When displaying parents of unsolved blockers, prefer parents that are not
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59 from UserDict import DictMixin
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         import cStringIO as StringIO
68 except ImportError:
69         import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",      "--version"
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if type(mysize) not in [types.IntType,types.LongType]:
282                 return str(mysize)
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 self.sets = self.setconfig.getSets()
774                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775
776 def create_world_atom(pkg, args_set, root_config):
777         """Create a new atom for the world file if one does not exist.  If the
778         argument atom is precise enough to identify a specific slot then a slot
779         atom will be returned. Atoms that are in the system set may also be stored
780         in world since system atoms can only match one slot while world atoms can
781         be greedy with respect to slots.  Unslotted system packages will not be
782         stored in world."""
783
784         arg_atom = args_set.findAtomForPackage(pkg)
785         if not arg_atom:
786                 return None
787         cp = portage.dep_getkey(arg_atom)
788         new_world_atom = cp
789         sets = root_config.sets
790         portdb = root_config.trees["porttree"].dbapi
791         vardb = root_config.trees["vartree"].dbapi
792         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793                 for cpv in portdb.match(cp))
794         slotted = len(available_slots) > 1 or \
795                 (len(available_slots) == 1 and "0" not in available_slots)
796         if not slotted:
797                 # check the vdb in case this is multislot
798                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799                         for cpv in vardb.match(cp))
800                 slotted = len(available_slots) > 1 or \
801                         (len(available_slots) == 1 and "0" not in available_slots)
802         if slotted and arg_atom != cp:
803                 # If the user gave a specific atom, store it as a
804                 # slot atom in the world file.
805                 slot_atom = pkg.slot_atom
806
807                 # For USE=multislot, there are a couple of cases to
808                 # handle here:
809                 #
810                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811                 #    unknown value, so just record an unslotted atom.
812                 #
813                 # 2) SLOT comes from an installed package and there is no
814                 #    matching SLOT in the portage tree.
815                 #
816                 # Make sure that the slot atom is available in either the
817                 # portdb or the vardb, since otherwise the user certainly
818                 # doesn't want the SLOT atom recorded in the world file
819                 # (case 1 above).  If it's only available in the vardb,
820                 # the user may be trying to prevent a USE=multislot
821                 # package from being removed by --depclean (case 2 above).
822
823                 mydb = portdb
824                 if not portdb.match(slot_atom):
825                         # SLOT seems to come from an installed multislot package
826                         mydb = vardb
827                 # If there is no installed package matching the SLOT atom,
828                 # it probably changed SLOT spontaneously due to USE=multislot,
829                 # so just record an unslotted atom.
830                 if vardb.match(slot_atom):
831                         # Now verify that the argument is precise
832                         # enough to identify a specific slot.
833                         matches = mydb.match(arg_atom)
834                         matched_slots = set()
835                         for cpv in matches:
836                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837                         if len(matched_slots) == 1:
838                                 new_world_atom = slot_atom
839
840         if new_world_atom == sets["world"].findAtomForPackage(pkg):
841                 # Both atoms would be identical, so there's nothing to add.
842                 return None
843         if not slotted:
844                 # Unlike world atoms, system atoms are not greedy for slots, so they
845                 # can't be safely excluded from world if they are slotted.
846                 system_atom = sets["system"].findAtomForPackage(pkg)
847                 if system_atom:
848                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
849                                 return None
850                         # System virtuals aren't safe to exclude from world since they can
851                         # match multiple old-style virtuals but only one of them will be
852                         # pulled in by update or depclean.
853                         providers = portdb.mysettings.getvirtuals().get(
854                                 portage.dep_getkey(system_atom))
855                         if providers and len(providers) == 1 and providers[0] == cp:
856                                 return None
857         return new_world_atom
858
859 def filter_iuse_defaults(iuse):
860         for flag in iuse:
861                 if flag.startswith("+") or flag.startswith("-"):
862                         yield flag[1:]
863                 else:
864                         yield flag
865
866 class SlotObject(object):
867         __slots__ = ("__weakref__",)
868
869         def __init__(self, **kwargs):
870                 classes = [self.__class__]
871                 while classes:
872                         c = classes.pop()
873                         if c is SlotObject:
874                                 continue
875                         classes.extend(c.__bases__)
876                         slots = getattr(c, "__slots__", None)
877                         if not slots:
878                                 continue
879                         for myattr in slots:
880                                 myvalue = kwargs.get(myattr, None)
881                                 setattr(self, myattr, myvalue)
882
883         def copy(self):
884                 """
885                 Create a new instance and copy all attributes
886                 defined from __slots__ (including those from
887                 inherited classes).
888                 """
889                 obj = self.__class__()
890
891                 classes = [self.__class__]
892                 while classes:
893                         c = classes.pop()
894                         if c is SlotObject:
895                                 continue
896                         classes.extend(c.__bases__)
897                         slots = getattr(c, "__slots__", None)
898                         if not slots:
899                                 continue
900                         for myattr in slots:
901                                 setattr(obj, myattr, getattr(self, myattr))
902
903                 return obj
904
905 class AbstractDepPriority(SlotObject):
906         __slots__ = ("buildtime", "runtime", "runtime_post")
907
908         def __lt__(self, other):
909                 return self.__int__() < other
910
911         def __le__(self, other):
912                 return self.__int__() <= other
913
914         def __eq__(self, other):
915                 return self.__int__() == other
916
917         def __ne__(self, other):
918                 return self.__int__() != other
919
920         def __gt__(self, other):
921                 return self.__int__() > other
922
923         def __ge__(self, other):
924                 return self.__int__() >= other
925
926         def copy(self):
927                 import copy
928                 return copy.copy(self)
929
930 class DepPriority(AbstractDepPriority):
931         """
932                 This class generates an integer priority level based of various
933                 attributes of the dependency relationship.  Attributes can be assigned
934                 at any time and the new integer value will be generated on calls to the
935                 __int__() method.  Rich comparison operators are supported.
936
937                 The boolean attributes that affect the integer value are "satisfied",
938                 "buildtime", "runtime", and "system".  Various combinations of
939                 attributes lead to the following priority levels:
940
941                 Combination of properties           Priority  Category
942
943                 not satisfied and buildtime            0       HARD
944                 not satisfied and runtime             -1       MEDIUM
945                 not satisfied and runtime_post        -2       MEDIUM_SOFT
946                 satisfied and buildtime and rebuild   -3       SOFT
947                 satisfied and buildtime               -4       SOFT
948                 satisfied and runtime                 -5       SOFT
949                 satisfied and runtime_post            -6       SOFT
950                 (none of the above)                   -6       SOFT
951
952                 Several integer constants are defined for categorization of priority
953                 levels:
954
955                 MEDIUM   The upper boundary for medium dependencies.
956                 MEDIUM_SOFT   The upper boundary for medium-soft dependencies.
957                 SOFT     The upper boundary for soft dependencies.
958                 MIN      The lower boundary for soft dependencies.
959         """
960         __slots__ = ("satisfied", "rebuild")
961         MEDIUM = -1
962         MEDIUM_SOFT = -2
963         SOFT   = -3
964         MIN    = -6
965
966         def __int__(self):
967                 if not self.satisfied:
968                         if self.buildtime:
969                                 return 0
970                         if self.runtime:
971                                 return -1
972                         if self.runtime_post:
973                                 return -2
974                 if self.buildtime:
975                         if self.rebuild:
976                                 return -3
977                         return -4
978                 if self.runtime:
979                         return -5
980                 if self.runtime_post:
981                         return -6
982                 return -6
983
984         def __str__(self):
985                 myvalue = self.__int__()
986                 if myvalue > self.MEDIUM:
987                         return "hard"
988                 if myvalue > self.MEDIUM_SOFT:
989                         return "medium"
990                 if myvalue > self.SOFT:
991                         return "medium-soft"
992                 return "soft"
993
994 class BlockerDepPriority(DepPriority):
995         __slots__ = ()
996         def __int__(self):
997                 return 0
998
999 BlockerDepPriority.instance = BlockerDepPriority()
1000
1001 class UnmergeDepPriority(AbstractDepPriority):
1002         __slots__ = ("satisfied",)
1003         """
1004         Combination of properties           Priority  Category
1005
1006         runtime                                0       HARD
1007         runtime_post                          -1       HARD
1008         buildtime                             -2       SOFT
1009         (none of the above)                   -2       SOFT
1010         """
1011
1012         MAX    =  0
1013         SOFT   = -2
1014         MIN    = -2
1015
1016         def __int__(self):
1017                 if self.runtime:
1018                         return 0
1019                 if self.runtime_post:
1020                         return -1
1021                 if self.buildtime:
1022                         return -2
1023                 return -2
1024
1025         def __str__(self):
1026                 myvalue = self.__int__()
1027                 if myvalue > self.SOFT:
1028                         return "hard"
1029                 return "soft"
1030
1031 class FakeVartree(portage.vartree):
1032         """This is implements an in-memory copy of a vartree instance that provides
1033         all the interfaces required for use by the depgraph.  The vardb is locked
1034         during the constructor call just long enough to read a copy of the
1035         installed package information.  This allows the depgraph to do it's
1036         dependency calculations without holding a lock on the vardb.  It also
1037         allows things like vardb global updates to be done in memory so that the
1038         user doesn't necessarily need write access to the vardb in cases where
1039         global updates are necessary (updates are performed when necessary if there
1040         is not a matching ebuild in the tree)."""
1041         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042                 self._root_config = root_config
1043                 if pkg_cache is None:
1044                         pkg_cache = {}
1045                 real_vartree = root_config.trees["vartree"]
1046                 portdb = root_config.trees["porttree"].dbapi
1047                 self.root = real_vartree.root
1048                 self.settings = real_vartree.settings
1049                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050                 if "_mtime_" not in mykeys:
1051                         mykeys.append("_mtime_")
1052                 self._db_keys = mykeys
1053                 self._pkg_cache = pkg_cache
1054                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1056                 try:
1057                         # At least the parent needs to exist for the lock file.
1058                         portage.util.ensure_dirs(vdb_path)
1059                 except portage.exception.PortageException:
1060                         pass
1061                 vdb_lock = None
1062                 try:
1063                         if acquire_lock and os.access(vdb_path, os.W_OK):
1064                                 vdb_lock = portage.locks.lockdir(vdb_path)
1065                         real_dbapi = real_vartree.dbapi
1066                         slot_counters = {}
1067                         for cpv in real_dbapi.cpv_all():
1068                                 cache_key = ("installed", self.root, cpv, "nomerge")
1069                                 pkg = self._pkg_cache.get(cache_key)
1070                                 if pkg is not None:
1071                                         metadata = pkg.metadata
1072                                 else:
1073                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074                                 myslot = metadata["SLOT"]
1075                                 mycp = portage.dep_getkey(cpv)
1076                                 myslot_atom = "%s:%s" % (mycp, myslot)
1077                                 try:
1078                                         mycounter = long(metadata["COUNTER"])
1079                                 except ValueError:
1080                                         mycounter = 0
1081                                         metadata["COUNTER"] = str(mycounter)
1082                                 other_counter = slot_counters.get(myslot_atom, None)
1083                                 if other_counter is not None:
1084                                         if other_counter > mycounter:
1085                                                 continue
1086                                 slot_counters[myslot_atom] = mycounter
1087                                 if pkg is None:
1088                                         pkg = Package(built=True, cpv=cpv,
1089                                                 installed=True, metadata=metadata,
1090                                                 root_config=root_config, type_name="installed")
1091                                 self._pkg_cache[pkg] = pkg
1092                                 self.dbapi.cpv_inject(pkg)
1093                         real_dbapi.flush_cache()
1094                 finally:
1095                         if vdb_lock:
1096                                 portage.locks.unlockdir(vdb_lock)
1097                 # Populate the old-style virtuals using the cached values.
1098                 if not self.settings.treeVirtuals:
1099                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100                                 portage.getCPFromCPV, self.get_all_provides())
1101
1102                 # Intialize variables needed for lazy cache pulls of the live ebuild
1103                 # metadata.  This ensures that the vardb lock is released ASAP, without
1104                 # being delayed in case cache generation is triggered.
1105                 self._aux_get = self.dbapi.aux_get
1106                 self.dbapi.aux_get = self._aux_get_wrapper
1107                 self._match = self.dbapi.match
1108                 self.dbapi.match = self._match_wrapper
1109                 self._aux_get_history = set()
1110                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111                 self._portdb = portdb
1112                 self._global_updates = None
1113
1114         def _match_wrapper(self, cpv, use_cache=1):
1115                 """
1116                 Make sure the metadata in Package instances gets updated for any
1117                 cpv that is returned from a match() call, since the metadata can
1118                 be accessed directly from the Package instance instead of via
1119                 aux_get().
1120                 """
1121                 matches = self._match(cpv, use_cache=use_cache)
1122                 for cpv in matches:
1123                         if cpv in self._aux_get_history:
1124                                 continue
1125                         self._aux_get_wrapper(cpv, [])
1126                 return matches
1127
1128         def _aux_get_wrapper(self, pkg, wants):
1129                 if pkg in self._aux_get_history:
1130                         return self._aux_get(pkg, wants)
1131                 self._aux_get_history.add(pkg)
1132                 try:
1133                         # Use the live ebuild metadata if possible.
1134                         live_metadata = dict(izip(self._portdb_keys,
1135                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1136                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1137                                 raise KeyError(pkg)
1138                         self.dbapi.aux_update(pkg, live_metadata)
1139                 except (KeyError, portage.exception.PortageException):
1140                         if self._global_updates is None:
1141                                 self._global_updates = \
1142                                         grab_global_updates(self._portdb.porttree_root)
1143                         perform_global_updates(
1144                                 pkg, self.dbapi, self._global_updates)
1145                 return self._aux_get(pkg, wants)
1146
1147         def sync(self, acquire_lock=1):
1148                 """
1149                 Call this method to synchronize state with the real vardb
1150                 after one or more packages may have been installed or
1151                 uninstalled.
1152                 """
1153                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1154                 try:
1155                         # At least the parent needs to exist for the lock file.
1156                         portage.util.ensure_dirs(vdb_path)
1157                 except portage.exception.PortageException:
1158                         pass
1159                 vdb_lock = None
1160                 try:
1161                         if acquire_lock and os.access(vdb_path, os.W_OK):
1162                                 vdb_lock = portage.locks.lockdir(vdb_path)
1163                         self._sync()
1164                 finally:
1165                         if vdb_lock:
1166                                 portage.locks.unlockdir(vdb_lock)
1167
1168         def _sync(self):
1169
1170                 real_vardb = self._root_config.trees["vartree"].dbapi
1171                 current_cpv_set = frozenset(real_vardb.cpv_all())
1172                 pkg_vardb = self.dbapi
1173                 aux_get_history = self._aux_get_history
1174
1175                 # Remove any packages that have been uninstalled.
1176                 for pkg in list(pkg_vardb):
1177                         if pkg.cpv not in current_cpv_set:
1178                                 pkg_vardb.cpv_remove(pkg)
1179                                 aux_get_history.discard(pkg.cpv)
1180
1181                 # Validate counters and timestamps.
1182                 slot_counters = {}
1183                 root = self.root
1184                 validation_keys = ["COUNTER", "_mtime_"]
1185                 for cpv in current_cpv_set:
1186
1187                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1188                         pkg = pkg_vardb.get(pkg_hash_key)
1189                         if pkg is not None:
1190                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1191                                 try:
1192                                         counter = long(counter)
1193                                 except ValueError:
1194                                         counter = 0
1195
1196                                 if counter != pkg.counter or \
1197                                         mtime != pkg.mtime:
1198                                         pkg_vardb.cpv_remove(pkg)
1199                                         aux_get_history.discard(pkg.cpv)
1200                                         pkg = None
1201
1202                         if pkg is None:
1203                                 pkg = self._pkg(cpv)
1204
1205                         other_counter = slot_counters.get(pkg.slot_atom)
1206                         if other_counter is not None:
1207                                 if other_counter > pkg.counter:
1208                                         continue
1209
1210                         slot_counters[pkg.slot_atom] = pkg.counter
1211                         pkg_vardb.cpv_inject(pkg)
1212
1213                 real_vardb.flush_cache()
1214
1215         def _pkg(self, cpv):
1216                 root_config = self._root_config
1217                 real_vardb = root_config.trees["vartree"].dbapi
1218                 pkg = Package(cpv=cpv, installed=True,
1219                         metadata=izip(self._db_keys,
1220                         real_vardb.aux_get(cpv, self._db_keys)),
1221                         root_config=root_config,
1222                         type_name="installed")
1223
1224                 try:
1225                         mycounter = long(pkg.metadata["COUNTER"])
1226                 except ValueError:
1227                         mycounter = 0
1228                         pkg.metadata["COUNTER"] = str(mycounter)
1229
1230                 return pkg
1231
1232 def grab_global_updates(portdir):
1233         from portage.update import grab_updates, parse_updates
1234         updpath = os.path.join(portdir, "profiles", "updates")
1235         try:
1236                 rawupdates = grab_updates(updpath)
1237         except portage.exception.DirectoryNotFound:
1238                 rawupdates = []
1239         upd_commands = []
1240         for mykey, mystat, mycontent in rawupdates:
1241                 commands, errors = parse_updates(mycontent)
1242                 upd_commands.extend(commands)
1243         return upd_commands
1244
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246         from portage.update import update_dbentries
1247         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249         updates = update_dbentries(mycommands, aux_dict)
1250         if updates:
1251                 mydb.aux_update(mycpv, updates)
1252
1253 def visible(pkgsettings, pkg):
1254         """
1255         Check if a package is visible. This can raise an InvalidDependString
1256         exception if LICENSE is invalid.
1257         TODO: optionally generate a list of masking reasons
1258         @rtype: Boolean
1259         @returns: True if the package is visible, False otherwise.
1260         """
1261         if not pkg.metadata["SLOT"]:
1262                 return False
1263         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264                 if not pkgsettings._accept_chost(pkg):
1265                         return False
1266         eapi = pkg.metadata["EAPI"]
1267         if not portage.eapi_is_supported(eapi):
1268                 return False
1269         if not pkg.installed:
1270                 if portage._eapi_is_deprecated(eapi):
1271                         return False
1272                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1273                         return False
1274         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1275                 return False
1276         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1277                 return False
1278         try:
1279                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1280                         return False
1281         except portage.exception.InvalidDependString:
1282                 return False
1283         return True
1284
1285 def get_masking_status(pkg, pkgsettings, root_config):
1286
1287         mreasons = portage.getmaskingstatus(
1288                 pkg, settings=pkgsettings,
1289                 portdb=root_config.trees["porttree"].dbapi)
1290
1291         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292                 if not pkgsettings._accept_chost(pkg):
1293                         mreasons.append("CHOST: %s" % \
1294                                 pkg.metadata["CHOST"])
1295
1296         if not pkg.metadata["SLOT"]:
1297                 mreasons.append("invalid: SLOT is undefined")
1298
1299         return mreasons
1300
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302         db, pkg_type, built, installed, db_keys):
1303         eapi_masked = False
1304         try:
1305                 metadata = dict(izip(db_keys,
1306                         db.aux_get(cpv, db_keys)))
1307         except KeyError:
1308                 metadata = None
1309         if metadata and not built:
1310                 pkgsettings.setcpv(cpv, mydb=metadata)
1311                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312         if metadata is None:
1313                 mreasons = ["corruption"]
1314         else:
1315                 pkg = Package(type_name=pkg_type, root_config=root_config,
1316                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1317                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318         return metadata, mreasons
1319
1320 def show_masked_packages(masked_packages):
1321         shown_licenses = set()
1322         shown_comments = set()
1323         # Maybe there is both an ebuild and a binary. Only
1324         # show one of them to avoid redundant appearance.
1325         shown_cpvs = set()
1326         have_eapi_mask = False
1327         for (root_config, pkgsettings, cpv,
1328                 metadata, mreasons) in masked_packages:
1329                 if cpv in shown_cpvs:
1330                         continue
1331                 shown_cpvs.add(cpv)
1332                 comment, filename = None, None
1333                 if "package.mask" in mreasons:
1334                         comment, filename = \
1335                                 portage.getmaskingreason(
1336                                 cpv, metadata=metadata,
1337                                 settings=pkgsettings,
1338                                 portdb=root_config.trees["porttree"].dbapi,
1339                                 return_location=True)
1340                 missing_licenses = []
1341                 if metadata:
1342                         if not portage.eapi_is_supported(metadata["EAPI"]):
1343                                 have_eapi_mask = True
1344                         try:
1345                                 missing_licenses = \
1346                                         pkgsettings._getMissingLicenses(
1347                                                 cpv, metadata)
1348                         except portage.exception.InvalidDependString:
1349                                 # This will have already been reported
1350                                 # above via mreasons.
1351                                 pass
1352
1353                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354                 if comment and comment not in shown_comments:
1355                         print filename+":"
1356                         print comment
1357                         shown_comments.add(comment)
1358                 portdb = root_config.trees["porttree"].dbapi
1359                 for l in missing_licenses:
1360                         l_path = portdb.findLicensePath(l)
1361                         if l in shown_licenses:
1362                                 continue
1363                         msg = ("A copy of the '%s' license" + \
1364                         " is located at '%s'.") % (l, l_path)
1365                         print msg
1366                         print
1367                         shown_licenses.add(l)
1368         return have_eapi_mask
1369
1370 class Task(SlotObject):
1371         __slots__ = ("_hash_key", "_hash_value")
1372
1373         def _get_hash_key(self):
1374                 hash_key = getattr(self, "_hash_key", None)
1375                 if hash_key is None:
1376                         raise NotImplementedError(self)
1377                 return hash_key
1378
1379         def __eq__(self, other):
1380                 return self._get_hash_key() == other
1381
1382         def __ne__(self, other):
1383                 return self._get_hash_key() != other
1384
1385         def __hash__(self):
1386                 hash_value = getattr(self, "_hash_value", None)
1387                 if hash_value is None:
1388                         self._hash_value = hash(self._get_hash_key())
1389                 return self._hash_value
1390
1391         def __len__(self):
1392                 return len(self._get_hash_key())
1393
1394         def __getitem__(self, key):
1395                 return self._get_hash_key()[key]
1396
1397         def __iter__(self):
1398                 return iter(self._get_hash_key())
1399
1400         def __contains__(self, key):
1401                 return key in self._get_hash_key()
1402
1403         def __str__(self):
1404                 return str(self._get_hash_key())
1405
1406 class Blocker(Task):
1407
1408         __hash__ = Task.__hash__
1409         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1410
1411         def __init__(self, **kwargs):
1412                 Task.__init__(self, **kwargs)
1413                 self.cp = portage.dep_getkey(self.atom)
1414
1415         def _get_hash_key(self):
1416                 hash_key = getattr(self, "_hash_key", None)
1417                 if hash_key is None:
1418                         self._hash_key = \
1419                                 ("blocks", self.root, self.atom, self.eapi)
1420                 return self._hash_key
1421
1422 class Package(Task):
1423
1424         __hash__ = Task.__hash__
1425         __slots__ = ("built", "cpv", "depth",
1426                 "installed", "metadata", "onlydeps", "operation",
1427                 "root_config", "type_name",
1428                 "category", "counter", "cp", "cpv_split",
1429                 "inherited", "iuse", "mtime",
1430                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1431
1432         metadata_keys = [
1433                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434                 "INHERITED", "IUSE", "KEYWORDS",
1435                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1437
1438         def __init__(self, **kwargs):
1439                 Task.__init__(self, **kwargs)
1440                 self.root = self.root_config.root
1441                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442                 self.cp = portage.cpv_getkey(self.cpv)
1443                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444                 self.category, self.pf = portage.catsplit(self.cpv)
1445                 self.cpv_split = portage.catpkgsplit(self.cpv)
1446                 self.pv_split = self.cpv_split[1:]
1447
1448         class _use(object):
1449
1450                 __slots__ = ("__weakref__", "enabled")
1451
1452                 def __init__(self, use):
1453                         self.enabled = frozenset(use)
1454
1455         class _iuse(object):
1456
1457                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1458
1459                 def __init__(self, tokens, iuse_implicit):
1460                         self.tokens = tuple(tokens)
1461                         self.iuse_implicit = iuse_implicit
1462                         enabled = []
1463                         disabled = []
1464                         other = []
1465                         for x in tokens:
1466                                 prefix = x[:1]
1467                                 if prefix == "+":
1468                                         enabled.append(x[1:])
1469                                 elif prefix == "-":
1470                                         disabled.append(x[1:])
1471                                 else:
1472                                         other.append(x)
1473                         self.enabled = frozenset(enabled)
1474                         self.disabled = frozenset(disabled)
1475                         self.all = frozenset(chain(enabled, disabled, other))
1476
1477                 def __getattribute__(self, name):
1478                         if name == "regex":
1479                                 try:
1480                                         return object.__getattribute__(self, "regex")
1481                                 except AttributeError:
1482                                         all = object.__getattribute__(self, "all")
1483                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484                                         # Escape anything except ".*" which is supposed
1485                                         # to pass through from _get_implicit_iuse()
1486                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487                                         regex = "^(%s)$" % "|".join(regex)
1488                                         regex = regex.replace("\\.\\*", ".*")
1489                                         self.regex = re.compile(regex)
1490                         return object.__getattribute__(self, name)
1491
1492         def _get_hash_key(self):
1493                 hash_key = getattr(self, "_hash_key", None)
1494                 if hash_key is None:
1495                         if self.operation is None:
1496                                 self.operation = "merge"
1497                                 if self.onlydeps or self.installed:
1498                                         self.operation = "nomerge"
1499                         self._hash_key = \
1500                                 (self.type_name, self.root, self.cpv, self.operation)
1501                 return self._hash_key
1502
1503         def __lt__(self, other):
1504                 if other.cp != self.cp:
1505                         return False
1506                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1507                         return True
1508                 return False
1509
1510         def __le__(self, other):
1511                 if other.cp != self.cp:
1512                         return False
1513                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1514                         return True
1515                 return False
1516
1517         def __gt__(self, other):
1518                 if other.cp != self.cp:
1519                         return False
1520                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1521                         return True
1522                 return False
1523
1524         def __ge__(self, other):
1525                 if other.cp != self.cp:
1526                         return False
1527                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1528                         return True
1529                 return False
1530
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532         if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1535
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1538
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1540         """
1541         Detect metadata updates and synchronize Package attributes.
1542         """
1543
1544         __slots__ = ("_pkg",)
1545         _wrapped_keys = frozenset(
1546                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1547
1548         def __init__(self, pkg, metadata):
1549                 _PackageMetadataWrapperBase.__init__(self)
1550                 self._pkg = pkg
1551                 self.update(metadata)
1552
1553         def __setitem__(self, k, v):
1554                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555                 if k in self._wrapped_keys:
1556                         getattr(self, "_set_" + k.lower())(k, v)
1557
1558         def _set_inherited(self, k, v):
1559                 if isinstance(v, basestring):
1560                         v = frozenset(v.split())
1561                 self._pkg.inherited = v
1562
1563         def _set_iuse(self, k, v):
1564                 self._pkg.iuse = self._pkg._iuse(
1565                         v.split(), self._pkg.root_config.iuse_implicit)
1566
1567         def _set_slot(self, k, v):
1568                 self._pkg.slot = v
1569
1570         def _set_use(self, k, v):
1571                 self._pkg.use = self._pkg._use(v.split())
1572
1573         def _set_counter(self, k, v):
1574                 if isinstance(v, basestring):
1575                         try:
1576                                 v = long(v.strip())
1577                         except ValueError:
1578                                 v = 0
1579                 self._pkg.counter = v
1580
1581         def _set__mtime_(self, k, v):
1582                 if isinstance(v, basestring):
1583                         try:
1584                                 v = long(v.strip())
1585                         except ValueError:
1586                                 v = 0
1587                 self._pkg.mtime = v
1588
1589 class EbuildFetchonly(SlotObject):
1590
1591         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1592
1593         def execute(self):
1594                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595                 # ensuring sane $PWD (bug #239560) and storing elog
1596                 # messages. Use a private temp directory, in order
1597                 # to avoid locking the main one.
1598                 settings = self.settings
1599                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600                 from tempfile import mkdtemp
1601                 try:
1602                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1603                 except OSError, e:
1604                         if e.errno != portage.exception.PermissionDenied.errno:
1605                                 raise
1606                         raise portage.exception.PermissionDenied(global_tmpdir)
1607                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608                 settings.backup_changes("PORTAGE_TMPDIR")
1609                 try:
1610                         retval = self._execute()
1611                 finally:
1612                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1613                         settings.backup_changes("PORTAGE_TMPDIR")
1614                         shutil.rmtree(private_tmpdir)
1615                 return retval
1616
1617         def _execute(self):
1618                 settings = self.settings
1619                 pkg = self.pkg
1620                 root_config = pkg.root_config
1621                 portdb = root_config.trees["porttree"].dbapi
1622                 ebuild_path = portdb.findname(pkg.cpv)
1623                 settings.setcpv(pkg)
1624                 debug = settings.get("PORTAGE_DEBUG") == "1"
1625                 use_cache = 1 # always true
1626                 portage.doebuild_environment(ebuild_path, "fetch",
1627                         root_config.root, settings, debug, use_cache, portdb)
1628                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1629
1630                 retval = portage.doebuild(ebuild_path, "fetch",
1631                         self.settings["ROOT"], self.settings, debug=debug,
1632                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633                         mydbapi=portdb, tree="porttree")
1634
1635                 if retval != os.EX_OK:
1636                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637                         eerror(msg, phase="unpack", key=pkg.cpv)
1638
1639                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1640                 return retval
1641
1642 class PollConstants(object):
1643
1644         """
1645         Provides POLL* constants that are equivalent to those from the
1646         select module, for use by PollSelectAdapter.
1647         """
1648
1649         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1650         v = 1
1651         for k in names:
1652                 locals()[k] = getattr(select, k, v)
1653                 v *= 2
1654         del k, v
1655
1656 class AsynchronousTask(SlotObject):
1657         """
1658         Subclasses override _wait() and _poll() so that calls
1659         to public methods can be wrapped for implementing
1660         hooks such as exit listener notification.
1661
1662         Sublasses should call self.wait() to notify exit listeners after
1663         the task is complete and self.returncode has been set.
1664         """
1665
1666         __slots__ = ("background", "cancelled", "returncode") + \
1667                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1668
1669         def start(self):
1670                 """
1671                 Start an asynchronous task and then return as soon as possible.
1672                 """
1673                 self._start()
1674                 self._start_hook()
1675
1676         def _start(self):
1677                 raise NotImplementedError(self)
1678
1679         def isAlive(self):
1680                 return self.returncode is None
1681
1682         def poll(self):
1683                 self._wait_hook()
1684                 return self._poll()
1685
1686         def _poll(self):
1687                 return self.returncode
1688
1689         def wait(self):
1690                 if self.returncode is None:
1691                         self._wait()
1692                 self._wait_hook()
1693                 return self.returncode
1694
1695         def _wait(self):
1696                 return self.returncode
1697
1698         def cancel(self):
1699                 self.cancelled = True
1700                 self.wait()
1701
1702         def addStartListener(self, f):
1703                 """
1704                 The function will be called with one argument, a reference to self.
1705                 """
1706                 if self._start_listeners is None:
1707                         self._start_listeners = []
1708                 self._start_listeners.append(f)
1709
1710         def removeStartListener(self, f):
1711                 if self._start_listeners is None:
1712                         return
1713                 self._start_listeners.remove(f)
1714
1715         def _start_hook(self):
1716                 if self._start_listeners is not None:
1717                         start_listeners = self._start_listeners
1718                         self._start_listeners = None
1719
1720                         for f in start_listeners:
1721                                 f(self)
1722
1723         def addExitListener(self, f):
1724                 """
1725                 The function will be called with one argument, a reference to self.
1726                 """
1727                 if self._exit_listeners is None:
1728                         self._exit_listeners = []
1729                 self._exit_listeners.append(f)
1730
1731         def removeExitListener(self, f):
1732                 if self._exit_listeners is None:
1733                         if self._exit_listener_stack is not None:
1734                                 self._exit_listener_stack.remove(f)
1735                         return
1736                 self._exit_listeners.remove(f)
1737
1738         def _wait_hook(self):
1739                 """
1740                 Call this method after the task completes, just before returning
1741                 the returncode from wait() or poll(). This hook is
1742                 used to trigger exit listeners when the returncode first
1743                 becomes available.
1744                 """
1745                 if self.returncode is not None and \
1746                         self._exit_listeners is not None:
1747
1748                         # This prevents recursion, in case one of the
1749                         # exit handlers triggers this method again by
1750                         # calling wait(). Use a stack that gives
1751                         # removeExitListener() an opportunity to consume
1752                         # listeners from the stack, before they can get
1753                         # called below. This is necessary because a call
1754                         # to one exit listener may result in a call to
1755                         # removeExitListener() for another listener on
1756                         # the stack. That listener needs to be removed
1757                         # from the stack since it would be inconsistent
1758                         # to call it after it has been been passed into
1759                         # removeExitListener().
1760                         self._exit_listener_stack = self._exit_listeners
1761                         self._exit_listeners = None
1762
1763                         self._exit_listener_stack.reverse()
1764                         while self._exit_listener_stack:
1765                                 self._exit_listener_stack.pop()(self)
1766
1767 class AbstractPollTask(AsynchronousTask):
1768
1769         __slots__ = ("scheduler",) + \
1770                 ("_registered",)
1771
1772         _bufsize = 4096
1773         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1775                 _exceptional_events
1776
1777         def _unregister(self):
1778                 raise NotImplementedError(self)
1779
1780         def _unregister_if_appropriate(self, event):
1781                 if self._registered:
1782                         if event & self._exceptional_events:
1783                                 self._unregister()
1784                                 self.cancel()
1785                         elif event & PollConstants.POLLHUP:
1786                                 self._unregister()
1787                                 self.wait()
1788
1789 class PipeReader(AbstractPollTask):
1790
1791         """
1792         Reads output from one or more files and saves it in memory,
1793         for retrieval via the getvalue() method. This is driven by
1794         the scheduler's poll() loop, so it runs entirely within the
1795         current process.
1796         """
1797
1798         __slots__ = ("input_files",) + \
1799                 ("_read_data", "_reg_ids")
1800
1801         def _start(self):
1802                 self._reg_ids = set()
1803                 self._read_data = []
1804                 for k, f in self.input_files.iteritems():
1805                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1808                                 self._registered_events, self._output_handler))
1809                 self._registered = True
1810
1811         def isAlive(self):
1812                 return self._registered
1813
1814         def cancel(self):
1815                 if self.returncode is None:
1816                         self.returncode = 1
1817                         self.cancelled = True
1818                 self.wait()
1819
1820         def _wait(self):
1821                 if self.returncode is not None:
1822                         return self.returncode
1823
1824                 if self._registered:
1825                         self.scheduler.schedule(self._reg_ids)
1826                         self._unregister()
1827
1828                 self.returncode = os.EX_OK
1829                 return self.returncode
1830
1831         def getvalue(self):
1832                 """Retrieve the entire contents"""
1833                 return "".join(self._read_data)
1834
1835         def close(self):
1836                 """Free the memory buffer."""
1837                 self._read_data = None
1838
1839         def _output_handler(self, fd, event):
1840
1841                 if event & PollConstants.POLLIN:
1842
1843                         for f in self.input_files.itervalues():
1844                                 if fd == f.fileno():
1845                                         break
1846
1847                         buf = array.array('B')
1848                         try:
1849                                 buf.fromfile(f, self._bufsize)
1850                         except EOFError:
1851                                 pass
1852
1853                         if buf:
1854                                 self._read_data.append(buf.tostring())
1855                         else:
1856                                 self._unregister()
1857                                 self.wait()
1858
1859                 self._unregister_if_appropriate(event)
1860                 return self._registered
1861
1862         def _unregister(self):
1863                 """
1864                 Unregister from the scheduler and close open files.
1865                 """
1866
1867                 self._registered = False
1868
1869                 if self._reg_ids is not None:
1870                         for reg_id in self._reg_ids:
1871                                 self.scheduler.unregister(reg_id)
1872                         self._reg_ids = None
1873
1874                 if self.input_files is not None:
1875                         for f in self.input_files.itervalues():
1876                                 f.close()
1877                         self.input_files = None
1878
1879 class CompositeTask(AsynchronousTask):
1880
1881         __slots__ = ("scheduler",) + ("_current_task",)
1882
1883         def isAlive(self):
1884                 return self._current_task is not None
1885
1886         def cancel(self):
1887                 self.cancelled = True
1888                 if self._current_task is not None:
1889                         self._current_task.cancel()
1890
1891         def _poll(self):
1892                 """
1893                 This does a loop calling self._current_task.poll()
1894                 repeatedly as long as the value of self._current_task
1895                 keeps changing. It calls poll() a maximum of one time
1896                 for a given self._current_task instance. This is useful
1897                 since calling poll() on a task can trigger advance to
1898                 the next task could eventually lead to the returncode
1899                 being set in cases when polling only a single task would
1900                 not have the same effect.
1901                 """
1902
1903                 prev = None
1904                 while True:
1905                         task = self._current_task
1906                         if task is None or task is prev:
1907                                 # don't poll the same task more than once
1908                                 break
1909                         task.poll()
1910                         prev = task
1911
1912                 return self.returncode
1913
1914         def _wait(self):
1915
1916                 prev = None
1917                 while True:
1918                         task = self._current_task
1919                         if task is None:
1920                                 # don't wait for the same task more than once
1921                                 break
1922                         if task is prev:
1923                                 # Before the task.wait() method returned, an exit
1924                                 # listener should have set self._current_task to either
1925                                 # a different task or None. Something is wrong.
1926                                 raise AssertionError("self._current_task has not " + \
1927                                         "changed since calling wait", self, task)
1928                         task.wait()
1929                         prev = task
1930
1931                 return self.returncode
1932
1933         def _assert_current(self, task):
1934                 """
1935                 Raises an AssertionError if the given task is not the
1936                 same one as self._current_task. This can be useful
1937                 for detecting bugs.
1938                 """
1939                 if task is not self._current_task:
1940                         raise AssertionError("Unrecognized task: %s" % (task,))
1941
1942         def _default_exit(self, task):
1943                 """
1944                 Calls _assert_current() on the given task and then sets the
1945                 composite returncode attribute if task.returncode != os.EX_OK.
1946                 If the task failed then self._current_task will be set to None.
1947                 Subclasses can use this as a generic task exit callback.
1948
1949                 @rtype: int
1950                 @returns: The task.returncode attribute.
1951                 """
1952                 self._assert_current(task)
1953                 if task.returncode != os.EX_OK:
1954                         self.returncode = task.returncode
1955                         self._current_task = None
1956                 return task.returncode
1957
1958         def _final_exit(self, task):
1959                 """
1960                 Assumes that task is the final task of this composite task.
1961                 Calls _default_exit() and sets self.returncode to the task's
1962                 returncode and sets self._current_task to None.
1963                 """
1964                 self._default_exit(task)
1965                 self._current_task = None
1966                 self.returncode = task.returncode
1967                 return self.returncode
1968
1969         def _default_final_exit(self, task):
1970                 """
1971                 This calls _final_exit() and then wait().
1972
1973                 Subclasses can use this as a generic final task exit callback.
1974
1975                 """
1976                 self._final_exit(task)
1977                 return self.wait()
1978
1979         def _start_task(self, task, exit_handler):
1980                 """
1981                 Register exit handler for the given task, set it
1982                 as self._current_task, and call task.start().
1983
1984                 Subclasses can use this as a generic way to start
1985                 a task.
1986
1987                 """
1988                 task.addExitListener(exit_handler)
1989                 self._current_task = task
1990                 task.start()
1991
1992 class TaskSequence(CompositeTask):
1993         """
1994         A collection of tasks that executes sequentially. Each task
1995         must have a addExitListener() method that can be used as
1996         a means to trigger movement from one task to the next.
1997         """
1998
1999         __slots__ = ("_task_queue",)
2000
2001         def __init__(self, **kwargs):
2002                 AsynchronousTask.__init__(self, **kwargs)
2003                 self._task_queue = deque()
2004
2005         def add(self, task):
2006                 self._task_queue.append(task)
2007
2008         def _start(self):
2009                 self._start_next_task()
2010
2011         def cancel(self):
2012                 self._task_queue.clear()
2013                 CompositeTask.cancel(self)
2014
2015         def _start_next_task(self):
2016                 self._start_task(self._task_queue.popleft(),
2017                         self._task_exit_handler)
2018
2019         def _task_exit_handler(self, task):
2020                 if self._default_exit(task) != os.EX_OK:
2021                         self.wait()
2022                 elif self._task_queue:
2023                         self._start_next_task()
2024                 else:
2025                         self._final_exit(task)
2026                         self.wait()
2027
2028 class SubProcess(AbstractPollTask):
2029
2030         __slots__ = ("pid",) + \
2031                 ("_files", "_reg_id")
2032
2033         # A file descriptor is required for the scheduler to monitor changes from
2034         # inside a poll() loop. When logging is not enabled, create a pipe just to
2035         # serve this purpose alone.
2036         _dummy_pipe_fd = 9
2037
2038         def _poll(self):
2039                 if self.returncode is not None:
2040                         return self.returncode
2041                 if self.pid is None:
2042                         return self.returncode
2043                 if self._registered:
2044                         return self.returncode
2045
2046                 try:
2047                         retval = os.waitpid(self.pid, os.WNOHANG)
2048                 except OSError, e:
2049                         if e.errno != errno.ECHILD:
2050                                 raise
2051                         del e
2052                         retval = (self.pid, 1)
2053
2054                 if retval == (0, 0):
2055                         return None
2056                 self._set_returncode(retval)
2057                 return self.returncode
2058
2059         def cancel(self):
2060                 if self.isAlive():
2061                         try:
2062                                 os.kill(self.pid, signal.SIGTERM)
2063                         except OSError, e:
2064                                 if e.errno != errno.ESRCH:
2065                                         raise
2066                                 del e
2067
2068                 self.cancelled = True
2069                 if self.pid is not None:
2070                         self.wait()
2071                 return self.returncode
2072
2073         def isAlive(self):
2074                 return self.pid is not None and \
2075                         self.returncode is None
2076
2077         def _wait(self):
2078
2079                 if self.returncode is not None:
2080                         return self.returncode
2081
2082                 if self._registered:
2083                         self.scheduler.schedule(self._reg_id)
2084                         self._unregister()
2085                         if self.returncode is not None:
2086                                 return self.returncode
2087
2088                 try:
2089                         wait_retval = os.waitpid(self.pid, 0)
2090                 except OSError, e:
2091                         if e.errno != errno.ECHILD:
2092                                 raise
2093                         del e
2094                         self._set_returncode((self.pid, 1))
2095                 else:
2096                         self._set_returncode(wait_retval)
2097
2098                 return self.returncode
2099
2100         def _unregister(self):
2101                 """
2102                 Unregister from the scheduler and close open files.
2103                 """
2104
2105                 self._registered = False
2106
2107                 if self._reg_id is not None:
2108                         self.scheduler.unregister(self._reg_id)
2109                         self._reg_id = None
2110
2111                 if self._files is not None:
2112                         for f in self._files.itervalues():
2113                                 f.close()
2114                         self._files = None
2115
2116         def _set_returncode(self, wait_retval):
2117
2118                 retval = wait_retval[1]
2119
2120                 if retval != os.EX_OK:
2121                         if retval & 0xff:
2122                                 retval = (retval & 0xff) << 8
2123                         else:
2124                                 retval = retval >> 8
2125
2126                 self.returncode = retval
2127
2128 class SpawnProcess(SubProcess):
2129
2130         """
2131         Constructor keyword args are passed into portage.process.spawn().
2132         The required "args" keyword argument will be passed as the first
2133         spawn() argument.
2134         """
2135
2136         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137                 "uid", "gid", "groups", "umask", "logfile",
2138                 "path_lookup", "pre_exec")
2139
2140         __slots__ = ("args",) + \
2141                 _spawn_kwarg_names
2142
2143         _file_names = ("log", "process", "stdout")
2144         _files_dict = slot_dict_class(_file_names, prefix="")
2145
2146         def _start(self):
2147
2148                 if self.cancelled:
2149                         return
2150
2151                 if self.fd_pipes is None:
2152                         self.fd_pipes = {}
2153                 fd_pipes = self.fd_pipes
2154                 fd_pipes.setdefault(0, sys.stdin.fileno())
2155                 fd_pipes.setdefault(1, sys.stdout.fileno())
2156                 fd_pipes.setdefault(2, sys.stderr.fileno())
2157
2158                 # flush any pending output
2159                 for fd in fd_pipes.itervalues():
2160                         if fd == sys.stdout.fileno():
2161                                 sys.stdout.flush()
2162                         if fd == sys.stderr.fileno():
2163                                 sys.stderr.flush()
2164
2165                 logfile = self.logfile
2166                 self._files = self._files_dict()
2167                 files = self._files
2168
2169                 master_fd, slave_fd = self._pipe(fd_pipes)
2170                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2172
2173                 null_input = None
2174                 fd_pipes_orig = fd_pipes.copy()
2175                 if self.background:
2176                         # TODO: Use job control functions like tcsetpgrp() to control
2177                         # access to stdin. Until then, use /dev/null so that any
2178                         # attempts to read from stdin will immediately return EOF
2179                         # instead of blocking indefinitely.
2180                         null_input = open('/dev/null', 'rb')
2181                         fd_pipes[0] = null_input.fileno()
2182                 else:
2183                         fd_pipes[0] = fd_pipes_orig[0]
2184
2185                 files.process = os.fdopen(master_fd, 'r')
2186                 if logfile is not None:
2187
2188                         fd_pipes[1] = slave_fd
2189                         fd_pipes[2] = slave_fd
2190
2191                         files.log = open(logfile, "a")
2192                         portage.util.apply_secpass_permissions(logfile,
2193                                 uid=portage.portage_uid, gid=portage.portage_gid,
2194                                 mode=0660)
2195
2196                         if not self.background:
2197                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2198
2199                         output_handler = self._output_handler
2200
2201                 else:
2202
2203                         # Create a dummy pipe so the scheduler can monitor
2204                         # the process from inside a poll() loop.
2205                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2206                         if self.background:
2207                                 fd_pipes[1] = slave_fd
2208                                 fd_pipes[2] = slave_fd
2209                         output_handler = self._dummy_handler
2210
2211                 kwargs = {}
2212                 for k in self._spawn_kwarg_names:
2213                         v = getattr(self, k)
2214                         if v is not None:
2215                                 kwargs[k] = v
2216
2217                 kwargs["fd_pipes"] = fd_pipes
2218                 kwargs["returnpid"] = True
2219                 kwargs.pop("logfile", None)
2220
2221                 self._reg_id = self.scheduler.register(files.process.fileno(),
2222                         self._registered_events, output_handler)
2223                 self._registered = True
2224
2225                 retval = self._spawn(self.args, **kwargs)
2226
2227                 os.close(slave_fd)
2228                 if null_input is not None:
2229                         null_input.close()
2230
2231                 if isinstance(retval, int):
2232                         # spawn failed
2233                         self._unregister()
2234                         self.returncode = retval
2235                         self.wait()
2236                         return
2237
2238                 self.pid = retval[0]
2239                 portage.process.spawned_pids.remove(self.pid)
2240
2241         def _pipe(self, fd_pipes):
2242                 """
2243                 @type fd_pipes: dict
2244                 @param fd_pipes: pipes from which to copy terminal size if desired.
2245                 """
2246                 return os.pipe()
2247
2248         def _spawn(self, args, **kwargs):
2249                 return portage.process.spawn(args, **kwargs)
2250
2251         def _output_handler(self, fd, event):
2252
2253                 if event & PollConstants.POLLIN:
2254
2255                         files = self._files
2256                         buf = array.array('B')
2257                         try:
2258                                 buf.fromfile(files.process, self._bufsize)
2259                         except EOFError:
2260                                 pass
2261
2262                         if buf:
2263                                 if not self.background:
2264                                         buf.tofile(files.stdout)
2265                                         files.stdout.flush()
2266                                 buf.tofile(files.log)
2267                                 files.log.flush()
2268                         else:
2269                                 self._unregister()
2270                                 self.wait()
2271
2272                 self._unregister_if_appropriate(event)
2273                 return self._registered
2274
2275         def _dummy_handler(self, fd, event):
2276                 """
2277                 This method is mainly interested in detecting EOF, since
2278                 the only purpose of the pipe is to allow the scheduler to
2279                 monitor the process from inside a poll() loop.
2280                 """
2281
2282                 if event & PollConstants.POLLIN:
2283
2284                         buf = array.array('B')
2285                         try:
2286                                 buf.fromfile(self._files.process, self._bufsize)
2287                         except EOFError:
2288                                 pass
2289
2290                         if buf:
2291                                 pass
2292                         else:
2293                                 self._unregister()
2294                                 self.wait()
2295
2296                 self._unregister_if_appropriate(event)
2297                 return self._registered
2298
2299 class MiscFunctionsProcess(SpawnProcess):
2300         """
2301         Spawns misc-functions.sh with an existing ebuild environment.
2302         """
2303
2304         __slots__ = ("commands", "phase", "pkg", "settings")
2305
2306         def _start(self):
2307                 settings = self.settings
2308                 settings.pop("EBUILD_PHASE", None)
2309                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310                 misc_sh_binary = os.path.join(portage_bin_path,
2311                         os.path.basename(portage.const.MISC_SH_BINARY))
2312
2313                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2315
2316                 portage._doebuild_exit_status_unlink(
2317                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2318
2319                 SpawnProcess._start(self)
2320
2321         def _spawn(self, args, **kwargs):
2322                 settings = self.settings
2323                 debug = settings.get("PORTAGE_DEBUG") == "1"
2324                 return portage.spawn(" ".join(args), settings,
2325                         debug=debug, **kwargs)
2326
2327         def _set_returncode(self, wait_retval):
2328                 SpawnProcess._set_returncode(self, wait_retval)
2329                 self.returncode = portage._doebuild_exit_status_check_and_log(
2330                         self.settings, self.phase, self.returncode)
2331
2332 class EbuildFetcher(SpawnProcess):
2333
2334         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2335                 ("_build_dir",)
2336
2337         def _start(self):
2338
2339                 root_config = self.pkg.root_config
2340                 portdb = root_config.trees["porttree"].dbapi
2341                 ebuild_path = portdb.findname(self.pkg.cpv)
2342                 settings = self.config_pool.allocate()
2343                 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344                 self._build_dir.lock()
2345                 self._build_dir.clean()
2346                 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347                 if self.logfile is None:
2348                         self.logfile = settings.get("PORTAGE_LOG_FILE")
2349
2350                 phase = "fetch"
2351                 if self.fetchall:
2352                         phase = "fetchall"
2353
2354                 # If any incremental variables have been overridden
2355                 # via the environment, those values need to be passed
2356                 # along here so that they are correctly considered by
2357                 # the config instance in the subproccess.
2358                 fetch_env = os.environ.copy()
2359
2360                 fetch_env["PORTAGE_NICENESS"] = "0"
2361                 if self.prefetch:
2362                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2363
2364                 ebuild_binary = os.path.join(
2365                         settings["PORTAGE_BIN_PATH"], "ebuild")
2366
2367                 fetch_args = [ebuild_binary, ebuild_path, phase]
2368                 debug = settings.get("PORTAGE_DEBUG") == "1"
2369                 if debug:
2370                         fetch_args.append("--debug")
2371
2372                 self.args = fetch_args
2373                 self.env = fetch_env
2374                 SpawnProcess._start(self)
2375
2376         def _pipe(self, fd_pipes):
2377                 """When appropriate, use a pty so that fetcher progress bars,
2378                 like wget has, will work properly."""
2379                 if self.background or not sys.stdout.isatty():
2380                         # When the output only goes to a log file,
2381                         # there's no point in creating a pty.
2382                         return os.pipe()
2383                 stdout_pipe = fd_pipes.get(1)
2384                 got_pty, master_fd, slave_fd = \
2385                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386                 return (master_fd, slave_fd)
2387
2388         def _set_returncode(self, wait_retval):
2389                 SpawnProcess._set_returncode(self, wait_retval)
2390                 # Collect elog messages that might have been
2391                 # created by the pkg_nofetch phase.
2392                 if self._build_dir is not None:
2393                         # Skip elog messages for prefetch, in order to avoid duplicates.
2394                         if not self.prefetch and self.returncode != os.EX_OK:
2395                                 elog_out = None
2396                                 if self.logfile is not None:
2397                                         if self.background:
2398                                                 elog_out = open(self.logfile, 'a')
2399                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400                                 if self.logfile is not None:
2401                                         msg += ", Log file:"
2402                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403                                 if self.logfile is not None:
2404                                         eerror(" '%s'" % (self.logfile,),
2405                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406                                 if elog_out is not None:
2407                                         elog_out.close()
2408                         if not self.prefetch:
2409                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410                         features = self._build_dir.settings.features
2411                         if self.returncode == os.EX_OK:
2412                                 self._build_dir.clean()
2413                         self._build_dir.unlock()
2414                         self.config_pool.deallocate(self._build_dir.settings)
2415                         self._build_dir = None
2416
2417 class EbuildBuildDir(SlotObject):
2418
2419         __slots__ = ("dir_path", "pkg", "settings",
2420                 "locked", "_catdir", "_lock_obj")
2421
2422         def __init__(self, **kwargs):
2423                 SlotObject.__init__(self, **kwargs)
2424                 self.locked = False
2425
2426         def lock(self):
2427                 """
2428                 This raises an AlreadyLocked exception if lock() is called
2429                 while a lock is already held. In order to avoid this, call
2430                 unlock() or check whether the "locked" attribute is True
2431                 or False before calling lock().
2432                 """
2433                 if self._lock_obj is not None:
2434                         raise self.AlreadyLocked((self._lock_obj,))
2435
2436                 dir_path = self.dir_path
2437                 if dir_path is None:
2438                         root_config = self.pkg.root_config
2439                         portdb = root_config.trees["porttree"].dbapi
2440                         ebuild_path = portdb.findname(self.pkg.cpv)
2441                         settings = self.settings
2442                         settings.setcpv(self.pkg)
2443                         debug = settings.get("PORTAGE_DEBUG") == "1"
2444                         use_cache = 1 # always true
2445                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446                                 self.settings, debug, use_cache, portdb)
2447                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2448
2449                 catdir = os.path.dirname(dir_path)
2450                 self._catdir = catdir
2451
2452                 portage.util.ensure_dirs(os.path.dirname(catdir),
2453                         gid=portage.portage_gid,
2454                         mode=070, mask=0)
2455                 catdir_lock = None
2456                 try:
2457                         catdir_lock = portage.locks.lockdir(catdir)
2458                         portage.util.ensure_dirs(catdir,
2459                                 gid=portage.portage_gid,
2460                                 mode=070, mask=0)
2461                         self._lock_obj = portage.locks.lockdir(dir_path)
2462                 finally:
2463                         self.locked = self._lock_obj is not None
2464                         if catdir_lock is not None:
2465                                 portage.locks.unlockdir(catdir_lock)
2466
2467         def clean(self):
2468                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469                 by keepwork or keeptemp in FEATURES."""
2470                 settings = self.settings
2471                 features = settings.features
2472                 if not ("keepwork" in features or "keeptemp" in features):
2473                         try:
2474                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475                         except EnvironmentError, e:
2476                                 if e.errno != errno.ENOENT:
2477                                         raise
2478                                 del e
2479
2480         def unlock(self):
2481                 if self._lock_obj is None:
2482                         return
2483
2484                 portage.locks.unlockdir(self._lock_obj)
2485                 self._lock_obj = None
2486                 self.locked = False
2487
2488                 catdir = self._catdir
2489                 catdir_lock = None
2490                 try:
2491                         catdir_lock = portage.locks.lockdir(catdir)
2492                 finally:
2493                         if catdir_lock:
2494                                 try:
2495                                         os.rmdir(catdir)
2496                                 except OSError, e:
2497                                         if e.errno not in (errno.ENOENT,
2498                                                 errno.ENOTEMPTY, errno.EEXIST):
2499                                                 raise
2500                                         del e
2501                                 portage.locks.unlockdir(catdir_lock)
2502
2503         class AlreadyLocked(portage.exception.PortageException):
2504                 pass
2505
2506 class EbuildBuild(CompositeTask):
2507
2508         __slots__ = ("args_set", "config_pool", "find_blockers",
2509                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510                 "prefetcher", "settings", "world_atom") + \
2511                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2512
2513         def _start(self):
2514
2515                 logger = self.logger
2516                 opts = self.opts
2517                 pkg = self.pkg
2518                 settings = self.settings
2519                 world_atom = self.world_atom
2520                 root_config = pkg.root_config
2521                 tree = "porttree"
2522                 self._tree = tree
2523                 portdb = root_config.trees[tree].dbapi
2524                 settings.setcpv(pkg)
2525                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526                 ebuild_path = portdb.findname(self.pkg.cpv)
2527                 self._ebuild_path = ebuild_path
2528
2529                 prefetcher = self.prefetcher
2530                 if prefetcher is None:
2531                         pass
2532                 elif not prefetcher.isAlive():
2533                         prefetcher.cancel()
2534                 elif prefetcher.poll() is None:
2535
2536                         waiting_msg = "Fetching files " + \
2537                                 "in the background. " + \
2538                                 "To view fetch progress, run `tail -f " + \
2539                                 "/var/log/emerge-fetch.log` in another " + \
2540                                 "terminal."
2541                         msg_prefix = colorize("GOOD", " * ")
2542                         from textwrap import wrap
2543                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544                                 for line in wrap(waiting_msg, 65))
2545                         if not self.background:
2546                                 writemsg(waiting_msg, noiselevel=-1)
2547
2548                         self._current_task = prefetcher
2549                         prefetcher.addExitListener(self._prefetch_exit)
2550                         return
2551
2552                 self._prefetch_exit(prefetcher)
2553
2554         def _prefetch_exit(self, prefetcher):
2555
2556                 opts = self.opts
2557                 pkg = self.pkg
2558                 settings = self.settings
2559
2560                 if opts.fetchonly:
2561                                 fetcher = EbuildFetchonly(
2562                                         fetch_all=opts.fetch_all_uri,
2563                                         pkg=pkg, pretend=opts.pretend,
2564                                         settings=settings)
2565                                 retval = fetcher.execute()
2566                                 self.returncode = retval
2567                                 self.wait()
2568                                 return
2569
2570                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571                         fetchall=opts.fetch_all_uri,
2572                         fetchonly=opts.fetchonly,
2573                         background=self.background,
2574                         pkg=pkg, scheduler=self.scheduler)
2575
2576                 self._start_task(fetcher, self._fetch_exit)
2577
2578         def _fetch_exit(self, fetcher):
2579                 opts = self.opts
2580                 pkg = self.pkg
2581
2582                 fetch_failed = False
2583                 if opts.fetchonly:
2584                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2585                 else:
2586                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2587
2588                 if fetch_failed and fetcher.logfile is not None and \
2589                         os.path.exists(fetcher.logfile):
2590                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2591
2592                 if not fetch_failed and fetcher.logfile is not None:
2593                         # Fetch was successful, so remove the fetch log.
2594                         try:
2595                                 os.unlink(fetcher.logfile)
2596                         except OSError:
2597                                 pass
2598
2599                 if fetch_failed or opts.fetchonly:
2600                         self.wait()
2601                         return
2602
2603                 logger = self.logger
2604                 opts = self.opts
2605                 pkg_count = self.pkg_count
2606                 scheduler = self.scheduler
2607                 settings = self.settings
2608                 features = settings.features
2609                 ebuild_path = self._ebuild_path
2610                 system_set = pkg.root_config.sets["system"]
2611
2612                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613                 self._build_dir.lock()
2614
2615                 # Cleaning is triggered before the setup
2616                 # phase, in portage.doebuild().
2617                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619                 short_msg = "emerge: (%s of %s) %s Clean" % \
2620                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621                 logger.log(msg, short_msg=short_msg)
2622
2623                 #buildsyspkg: Check if we need to _force_ binary package creation
2624                 self._issyspkg = "buildsyspkg" in features and \
2625                                 system_set.findAtomForPackage(pkg) and \
2626                                 not opts.buildpkg
2627
2628                 if opts.buildpkg or self._issyspkg:
2629
2630                         self._buildpkg = True
2631
2632                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634                         short_msg = "emerge: (%s of %s) %s Compile" % \
2635                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636                         logger.log(msg, short_msg=short_msg)
2637
2638                 else:
2639                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641                         short_msg = "emerge: (%s of %s) %s Compile" % \
2642                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643                         logger.log(msg, short_msg=short_msg)
2644
2645                 build = EbuildExecuter(background=self.background, pkg=pkg,
2646                         scheduler=scheduler, settings=settings)
2647                 self._start_task(build, self._build_exit)
2648
2649         def _unlock_builddir(self):
2650                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651                 self._build_dir.unlock()
2652
2653         def _build_exit(self, build):
2654                 if self._default_exit(build) != os.EX_OK:
2655                         self._unlock_builddir()
2656                         self.wait()
2657                         return
2658
2659                 opts = self.opts
2660                 buildpkg = self._buildpkg
2661
2662                 if not buildpkg:
2663                         self._final_exit(build)
2664                         self.wait()
2665                         return
2666
2667                 if self._issyspkg:
2668                         msg = ">>> This is a system package, " + \
2669                                 "let's pack a rescue tarball.\n"
2670
2671                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2672                         if log_path is not None:
2673                                 log_file = open(log_path, 'a')
2674                                 try:
2675                                         log_file.write(msg)
2676                                 finally:
2677                                         log_file.close()
2678
2679                         if not self.background:
2680                                 portage.writemsg_stdout(msg, noiselevel=-1)
2681
2682                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683                         scheduler=self.scheduler, settings=self.settings)
2684
2685                 self._start_task(packager, self._buildpkg_exit)
2686
2687         def _buildpkg_exit(self, packager):
2688                 """
2689                 Released build dir lock when there is a failure or
2690                 when in buildpkgonly mode. Otherwise, the lock will
2691                 be released when merge() is called.
2692                 """
2693
2694                 if self._default_exit(packager) != os.EX_OK:
2695                         self._unlock_builddir()
2696                         self.wait()
2697                         return
2698
2699                 if self.opts.buildpkgonly:
2700                         # Need to call "clean" phase for buildpkgonly mode
2701                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2702                         phase = "clean"
2703                         clean_phase = EbuildPhase(background=self.background,
2704                                 pkg=self.pkg, phase=phase,
2705                                 scheduler=self.scheduler, settings=self.settings,
2706                                 tree=self._tree)
2707                         self._start_task(clean_phase, self._clean_exit)
2708                         return
2709
2710                 # Continue holding the builddir lock until
2711                 # after the package has been installed.
2712                 self._current_task = None
2713                 self.returncode = packager.returncode
2714                 self.wait()
2715
2716         def _clean_exit(self, clean_phase):
2717                 if self._final_exit(clean_phase) != os.EX_OK or \
2718                         self.opts.buildpkgonly:
2719                         self._unlock_builddir()
2720                 self.wait()
2721
2722         def install(self):
2723                 """
2724                 Install the package and then clean up and release locks.
2725                 Only call this after the build has completed successfully
2726                 and neither fetchonly nor buildpkgonly mode are enabled.
2727                 """
2728
2729                 find_blockers = self.find_blockers
2730                 ldpath_mtimes = self.ldpath_mtimes
2731                 logger = self.logger
2732                 pkg = self.pkg
2733                 pkg_count = self.pkg_count
2734                 settings = self.settings
2735                 world_atom = self.world_atom
2736                 ebuild_path = self._ebuild_path
2737                 tree = self._tree
2738
2739                 merge = EbuildMerge(find_blockers=self.find_blockers,
2740                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741                         pkg_count=pkg_count, pkg_path=ebuild_path,
2742                         scheduler=self.scheduler,
2743                         settings=settings, tree=tree, world_atom=world_atom)
2744
2745                 msg = " === (%s of %s) Merging (%s::%s)" % \
2746                         (pkg_count.curval, pkg_count.maxval,
2747                         pkg.cpv, ebuild_path)
2748                 short_msg = "emerge: (%s of %s) %s Merge" % \
2749                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750                 logger.log(msg, short_msg=short_msg)
2751
2752                 try:
2753                         rval = merge.execute()
2754                 finally:
2755                         self._unlock_builddir()
2756
2757                 return rval
2758
2759 class EbuildExecuter(CompositeTask):
2760
2761         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2762
2763         _phases = ("prepare", "configure", "compile", "test", "install")
2764
2765         _live_eclasses = frozenset([
2766                 "bzr",
2767                 "cvs",
2768                 "darcs",
2769                 "git",
2770                 "mercurial",
2771                 "subversion"
2772         ])
2773
2774         def _start(self):
2775                 self._tree = "porttree"
2776                 pkg = self.pkg
2777                 phase = "clean"
2778                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780                 self._start_task(clean_phase, self._clean_phase_exit)
2781
2782         def _clean_phase_exit(self, clean_phase):
2783
2784                 if self._default_exit(clean_phase) != os.EX_OK:
2785                         self.wait()
2786                         return
2787
2788                 pkg = self.pkg
2789                 scheduler = self.scheduler
2790                 settings = self.settings
2791                 cleanup = 1
2792
2793                 # This initializes PORTAGE_LOG_FILE.
2794                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2795
2796                 setup_phase = EbuildPhase(background=self.background,
2797                         pkg=pkg, phase="setup", scheduler=scheduler,
2798                         settings=settings, tree=self._tree)
2799
2800                 setup_phase.addExitListener(self._setup_exit)
2801                 self._current_task = setup_phase
2802                 self.scheduler.scheduleSetup(setup_phase)
2803
2804         def _setup_exit(self, setup_phase):
2805
2806                 if self._default_exit(setup_phase) != os.EX_OK:
2807                         self.wait()
2808                         return
2809
2810                 unpack_phase = EbuildPhase(background=self.background,
2811                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812                         settings=self.settings, tree=self._tree)
2813
2814                 if self._live_eclasses.intersection(self.pkg.inherited):
2815                         # Serialize $DISTDIR access for live ebuilds since
2816                         # otherwise they can interfere with eachother.
2817
2818                         unpack_phase.addExitListener(self._unpack_exit)
2819                         self._current_task = unpack_phase
2820                         self.scheduler.scheduleUnpack(unpack_phase)
2821
2822                 else:
2823                         self._start_task(unpack_phase, self._unpack_exit)
2824
2825         def _unpack_exit(self, unpack_phase):
2826
2827                 if self._default_exit(unpack_phase) != os.EX_OK:
2828                         self.wait()
2829                         return
2830
2831                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2832
2833                 pkg = self.pkg
2834                 phases = self._phases
2835                 eapi = pkg.metadata["EAPI"]
2836                 if eapi in ("0", "1", "2_pre1"):
2837                         # skip src_prepare and src_configure
2838                         phases = phases[2:]
2839                 elif eapi in ("2_pre2",):
2840                         # skip src_prepare
2841                         phases = phases[1:]
2842
2843                 for phase in phases:
2844                         ebuild_phases.add(EbuildPhase(background=self.background,
2845                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846                                 settings=self.settings, tree=self._tree))
2847
2848                 self._start_task(ebuild_phases, self._default_final_exit)
2849
2850 class EbuildMetadataPhase(SubProcess):
2851
2852         """
2853         Asynchronous interface for the ebuild "depend" phase which is
2854         used to extract metadata from the ebuild.
2855         """
2856
2857         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2859                 ("_raw_metadata",)
2860
2861         _file_names = ("ebuild",)
2862         _files_dict = slot_dict_class(_file_names, prefix="")
2863         _metadata_fd = 9
2864
2865         def _start(self):
2866                 settings = self.settings
2867                 settings.reset()
2868                 ebuild_path = self.ebuild_path
2869                 debug = settings.get("PORTAGE_DEBUG") == "1"
2870                 master_fd = None
2871                 slave_fd = None
2872                 fd_pipes = None
2873                 if self.fd_pipes is not None:
2874                         fd_pipes = self.fd_pipes.copy()
2875                 else:
2876                         fd_pipes = {}
2877
2878                 fd_pipes.setdefault(0, sys.stdin.fileno())
2879                 fd_pipes.setdefault(1, sys.stdout.fileno())
2880                 fd_pipes.setdefault(2, sys.stderr.fileno())
2881
2882                 # flush any pending output
2883                 for fd in fd_pipes.itervalues():
2884                         if fd == sys.stdout.fileno():
2885                                 sys.stdout.flush()
2886                         if fd == sys.stderr.fileno():
2887                                 sys.stderr.flush()
2888
2889                 fd_pipes_orig = fd_pipes.copy()
2890                 self._files = self._files_dict()
2891                 files = self._files
2892
2893                 master_fd, slave_fd = os.pipe()
2894                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2896
2897                 fd_pipes[self._metadata_fd] = slave_fd
2898
2899                 self._raw_metadata = []
2900                 files.ebuild = os.fdopen(master_fd, 'r')
2901                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902                         self._registered_events, self._output_handler)
2903                 self._registered = True
2904
2905                 retval = portage.doebuild(ebuild_path, "depend",
2906                         settings["ROOT"], settings, debug,
2907                         mydbapi=self.portdb, tree="porttree",
2908                         fd_pipes=fd_pipes, returnpid=True)
2909
2910                 os.close(slave_fd)
2911
2912                 if isinstance(retval, int):
2913                         # doebuild failed before spawning
2914                         self._unregister()
2915                         self.returncode = retval
2916                         self.wait()
2917                         return
2918
2919                 self.pid = retval[0]
2920                 portage.process.spawned_pids.remove(self.pid)
2921
2922         def _output_handler(self, fd, event):
2923
2924                 if event & PollConstants.POLLIN:
2925                         self._raw_metadata.append(self._files.ebuild.read())
2926                         if not self._raw_metadata[-1]:
2927                                 self._unregister()
2928                                 self.wait()
2929
2930                 self._unregister_if_appropriate(event)
2931                 return self._registered
2932
2933         def _set_returncode(self, wait_retval):
2934                 SubProcess._set_returncode(self, wait_retval)
2935                 if self.returncode == os.EX_OK:
2936                         metadata_lines = "".join(self._raw_metadata).splitlines()
2937                         if len(portage.auxdbkeys) != len(metadata_lines):
2938                                 # Don't trust bash's returncode if the
2939                                 # number of lines is incorrect.
2940                                 self.returncode = 1
2941                         else:
2942                                 metadata = izip(portage.auxdbkeys, metadata_lines)
2943                                 self.metadata_callback(self.cpv, self.ebuild_path,
2944                                         self.repo_path, metadata, self.ebuild_mtime)
2945
2946 class EbuildProcess(SpawnProcess):
2947
2948         __slots__ = ("phase", "pkg", "settings", "tree")
2949
2950         def _start(self):
2951                 # Don't open the log file during the clean phase since the
2952                 # open file can result in an nfs lock on $T/build.log which
2953                 # prevents the clean phase from removing $T.
2954                 if self.phase not in ("clean", "cleanrm"):
2955                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956                 SpawnProcess._start(self)
2957
2958         def _pipe(self, fd_pipes):
2959                 stdout_pipe = fd_pipes.get(1)
2960                 got_pty, master_fd, slave_fd = \
2961                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962                 return (master_fd, slave_fd)
2963
2964         def _spawn(self, args, **kwargs):
2965
2966                 root_config = self.pkg.root_config
2967                 tree = self.tree
2968                 mydbapi = root_config.trees[tree].dbapi
2969                 settings = self.settings
2970                 ebuild_path = settings["EBUILD"]
2971                 debug = settings.get("PORTAGE_DEBUG") == "1"
2972
2973                 rval = portage.doebuild(ebuild_path, self.phase,
2974                         root_config.root, settings, debug,
2975                         mydbapi=mydbapi, tree=tree, **kwargs)
2976
2977                 return rval
2978
2979         def _set_returncode(self, wait_retval):
2980                 SpawnProcess._set_returncode(self, wait_retval)
2981
2982                 if self.phase not in ("clean", "cleanrm"):
2983                         self.returncode = portage._doebuild_exit_status_check_and_log(
2984                                 self.settings, self.phase, self.returncode)
2985
2986                 if self.phase == "test" and self.returncode != os.EX_OK and \
2987                         "test-fail-continue" in self.settings.features:
2988                         self.returncode = os.EX_OK
2989
2990                 portage._post_phase_userpriv_perms(self.settings)
2991
2992 class EbuildPhase(CompositeTask):
2993
2994         __slots__ = ("background", "pkg", "phase",
2995                 "scheduler", "settings", "tree")
2996
2997         _post_phase_cmds = portage._post_phase_cmds
2998
2999         def _start(self):
3000
3001                 ebuild_process = EbuildProcess(background=self.background,
3002                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003                         settings=self.settings, tree=self.tree)
3004
3005                 self._start_task(ebuild_process, self._ebuild_exit)
3006
3007         def _ebuild_exit(self, ebuild_process):
3008
3009                 if self.phase == "install":
3010                         out = None
3011                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3012                         log_file = None
3013                         if self.background and log_path is not None:
3014                                 log_file = open(log_path, 'a')
3015                                 out = log_file
3016                         try:
3017                                 portage._check_build_log(self.settings, out=out)
3018                         finally:
3019                                 if log_file is not None:
3020                                         log_file.close()
3021
3022                 if self._default_exit(ebuild_process) != os.EX_OK:
3023                         self.wait()
3024                         return
3025
3026                 settings = self.settings
3027
3028                 if self.phase == "install":
3029                         portage._post_src_install_uid_fix(settings)
3030
3031                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032                 if post_phase_cmds is not None:
3033                         post_phase = MiscFunctionsProcess(background=self.background,
3034                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035                                 scheduler=self.scheduler, settings=settings)
3036                         self._start_task(post_phase, self._post_phase_exit)
3037                         return
3038
3039                 self.returncode = ebuild_process.returncode
3040                 self._current_task = None
3041                 self.wait()
3042
3043         def _post_phase_exit(self, post_phase):
3044                 if self._final_exit(post_phase) != os.EX_OK:
3045                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3046                                 noiselevel=-1)
3047                 self._current_task = None
3048                 self.wait()
3049                 return
3050
3051 class EbuildBinpkg(EbuildProcess):
3052         """
3053         This assumes that src_install() has successfully completed.
3054         """
3055         __slots__ = ("_binpkg_tmpfile",)
3056
3057         def _start(self):
3058                 self.phase = "package"
3059                 self.tree = "porttree"
3060                 pkg = self.pkg
3061                 root_config = pkg.root_config
3062                 portdb = root_config.trees["porttree"].dbapi
3063                 bintree = root_config.trees["bintree"]
3064                 ebuild_path = portdb.findname(self.pkg.cpv)
3065                 settings = self.settings
3066                 debug = settings.get("PORTAGE_DEBUG") == "1"
3067
3068                 bintree.prevent_collision(pkg.cpv)
3069                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070                         pkg.cpv + ".tbz2." + str(os.getpid()))
3071                 self._binpkg_tmpfile = binpkg_tmpfile
3072                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3074
3075                 try:
3076                         EbuildProcess._start(self)
3077                 finally:
3078                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3079
3080         def _set_returncode(self, wait_retval):
3081                 EbuildProcess._set_returncode(self, wait_retval)
3082
3083                 pkg = self.pkg
3084                 bintree = pkg.root_config.trees["bintree"]
3085                 binpkg_tmpfile = self._binpkg_tmpfile
3086                 if self.returncode == os.EX_OK:
3087                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3088
3089 class EbuildMerge(SlotObject):
3090
3091         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092                 "pkg", "pkg_count", "pkg_path", "pretend",
3093                 "scheduler", "settings", "tree", "world_atom")
3094
3095         def execute(self):
3096                 root_config = self.pkg.root_config
3097                 settings = self.settings
3098                 retval = portage.merge(settings["CATEGORY"],
3099                         settings["PF"], settings["D"],
3100                         os.path.join(settings["PORTAGE_BUILDDIR"],
3101                         "build-info"), root_config.root, settings,
3102                         myebuild=settings["EBUILD"],
3103                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104                         vartree=root_config.trees["vartree"],
3105                         prev_mtimes=self.ldpath_mtimes,
3106                         scheduler=self.scheduler,
3107                         blockers=self.find_blockers)
3108
3109                 if retval == os.EX_OK:
3110                         self.world_atom(self.pkg)
3111                         self._log_success()
3112
3113                 return retval
3114
3115         def _log_success(self):
3116                 pkg = self.pkg
3117                 pkg_count = self.pkg_count
3118                 pkg_path = self.pkg_path
3119                 logger = self.logger
3120                 if "noclean" not in self.settings.features:
3121                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123                         logger.log((" === (%s of %s) " + \
3124                                 "Post-Build Cleaning (%s::%s)") % \
3125                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126                                 short_msg=short_msg)
3127                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3129
3130 class PackageUninstall(AsynchronousTask):
3131
3132         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3133
3134         def _start(self):
3135                 try:
3136                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3137                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139                                 writemsg_level=self._writemsg_level)
3140                 except UninstallFailure, e:
3141                         self.returncode = e.status
3142                 else:
3143                         self.returncode = os.EX_OK
3144                 self.wait()
3145
3146         def _writemsg_level(self, msg, level=0, noiselevel=0):
3147
3148                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149                 background = self.background
3150
3151                 if log_path is None:
3152                         if not (background and level < logging.WARNING):
3153                                 portage.util.writemsg_level(msg,
3154                                         level=level, noiselevel=noiselevel)
3155                 else:
3156                         if not background:
3157                                 portage.util.writemsg_level(msg,
3158                                         level=level, noiselevel=noiselevel)
3159
3160                         f = open(log_path, 'a')
3161                         try:
3162                                 f.write(msg)
3163                         finally:
3164                                 f.close()
3165
3166 class Binpkg(CompositeTask):
3167
3168         __slots__ = ("find_blockers",
3169                 "ldpath_mtimes", "logger", "opts",
3170                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3173
3174         def _writemsg_level(self, msg, level=0, noiselevel=0):
3175
3176                 if not self.background:
3177                         portage.util.writemsg_level(msg,
3178                                 level=level, noiselevel=noiselevel)
3179
3180                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181                 if  log_path is not None:
3182                         f = open(log_path, 'a')
3183                         try:
3184                                 f.write(msg)
3185                         finally:
3186                                 f.close()
3187
3188         def _start(self):
3189
3190                 pkg = self.pkg
3191                 settings = self.settings
3192                 settings.setcpv(pkg)
3193                 self._tree = "bintree"
3194                 self._bintree = self.pkg.root_config.trees[self._tree]
3195                 self._verify = not self.opts.pretend
3196
3197                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198                         "portage", pkg.category, pkg.pf)
3199                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200                         pkg=pkg, settings=settings)
3201                 self._image_dir = os.path.join(dir_path, "image")
3202                 self._infloc = os.path.join(dir_path, "build-info")
3203                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204                 settings["EBUILD"] = self._ebuild_path
3205                 debug = settings.get("PORTAGE_DEBUG") == "1"
3206                 portage.doebuild_environment(self._ebuild_path, "setup",
3207                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3209
3210                 # The prefetcher has already completed or it
3211                 # could be running now. If it's running now,
3212                 # wait for it to complete since it holds
3213                 # a lock on the file being fetched. The
3214                 # portage.locks functions are only designed
3215                 # to work between separate processes. Since
3216                 # the lock is held by the current process,
3217                 # use the scheduler and fetcher methods to
3218                 # synchronize with the fetcher.
3219                 prefetcher = self.prefetcher
3220                 if prefetcher is None:
3221                         pass
3222                 elif not prefetcher.isAlive():
3223                         prefetcher.cancel()
3224                 elif prefetcher.poll() is None:
3225
3226                         waiting_msg = ("Fetching '%s' " + \
3227                                 "in the background. " + \
3228                                 "To view fetch progress, run `tail -f " + \
3229                                 "/var/log/emerge-fetch.log` in another " + \
3230                                 "terminal.") % prefetcher.pkg_path
3231                         msg_prefix = colorize("GOOD", " * ")
3232                         from textwrap import wrap
3233                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234                                 for line in wrap(waiting_msg, 65))
3235                         if not self.background:
3236                                 writemsg(waiting_msg, noiselevel=-1)
3237
3238                         self._current_task = prefetcher
3239                         prefetcher.addExitListener(self._prefetch_exit)
3240                         return
3241
3242                 self._prefetch_exit(prefetcher)
3243
3244         def _prefetch_exit(self, prefetcher):
3245
3246                 pkg = self.pkg
3247                 pkg_count = self.pkg_count
3248                 if not (self.opts.pretend or self.opts.fetchonly):
3249                         self._build_dir.lock()
3250                         try:
3251                                 shutil.rmtree(self._build_dir.dir_path)
3252                         except EnvironmentError, e:
3253                                 if e.errno != errno.ENOENT:
3254                                         raise
3255                                 del e
3256                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257                 fetcher = BinpkgFetcher(background=self.background,
3258                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259                         pretend=self.opts.pretend, scheduler=self.scheduler)
3260                 pkg_path = fetcher.pkg_path
3261                 self._pkg_path = pkg_path
3262
3263                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3264
3265                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3268                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269                         self.logger.log(msg, short_msg=short_msg)
3270                         self._start_task(fetcher, self._fetcher_exit)
3271                         return
3272
3273                 self._fetcher_exit(fetcher)
3274
3275         def _fetcher_exit(self, fetcher):
3276
3277                 # The fetcher only has a returncode when
3278                 # --getbinpkg is enabled.
3279                 if fetcher.returncode is not None:
3280                         self._fetched_pkg = True
3281                         if self._default_exit(fetcher) != os.EX_OK:
3282                                 self._unlock_builddir()
3283                                 self.wait()
3284                                 return
3285
3286                 if self.opts.pretend:
3287                         self._current_task = None
3288                         self.returncode = os.EX_OK
3289                         self.wait()
3290                         return
3291
3292                 verifier = None
3293                 if self._verify:
3294                         logfile = None
3295                         if self.background:
3296                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297                         verifier = BinpkgVerifier(background=self.background,
3298                                 logfile=logfile, pkg=self.pkg)
3299                         self._start_task(verifier, self._verifier_exit)
3300                         return
3301
3302                 self._verifier_exit(verifier)
3303
3304         def _verifier_exit(self, verifier):
3305                 if verifier is not None and \
3306                         self._default_exit(verifier) != os.EX_OK:
3307                         self._unlock_builddir()
3308                         self.wait()
3309                         return
3310
3311                 logger = self.logger
3312                 pkg = self.pkg
3313                 pkg_count = self.pkg_count
3314                 pkg_path = self._pkg_path
3315
3316                 if self._fetched_pkg:
3317                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3318
3319                 if self.opts.fetchonly:
3320                         self._current_task = None
3321                         self.returncode = os.EX_OK
3322                         self.wait()
3323                         return
3324
3325                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329                 logger.log(msg, short_msg=short_msg)
3330
3331                 phase = "clean"
3332                 settings = self.settings
3333                 ebuild_phase = EbuildPhase(background=self.background,
3334                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3335                         settings=settings, tree=self._tree)
3336
3337                 self._start_task(ebuild_phase, self._clean_exit)
3338
3339         def _clean_exit(self, clean_phase):
3340                 if self._default_exit(clean_phase) != os.EX_OK:
3341                         self._unlock_builddir()
3342                         self.wait()
3343                         return
3344
3345                 dir_path = self._build_dir.dir_path
3346
3347                 try:
3348                         shutil.rmtree(dir_path)
3349                 except (IOError, OSError), e:
3350                         if e.errno != errno.ENOENT:
3351                                 raise
3352                         del e
3353
3354                 infloc = self._infloc
3355                 pkg = self.pkg
3356                 pkg_path = self._pkg_path
3357
3358                 dir_mode = 0755
3359                 for mydir in (dir_path, self._image_dir, infloc):
3360                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361                                 gid=portage.data.portage_gid, mode=dir_mode)
3362
3363                 # This initializes PORTAGE_LOG_FILE.
3364                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365                 self._writemsg_level(">>> Extracting info\n")
3366
3367                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368                 check_missing_metadata = ("CATEGORY", "PF")
3369                 missing_metadata = set()
3370                 for k in check_missing_metadata:
3371                         v = pkg_xpak.getfile(k)
3372                         if not v:
3373                                 missing_metadata.add(k)
3374
3375                 pkg_xpak.unpackinfo(infloc)
3376                 for k in missing_metadata:
3377                         if k == "CATEGORY":
3378                                 v = pkg.category
3379                         elif k == "PF":
3380                                 v = pkg.pf
3381                         else:
3382                                 continue
3383
3384                         f = open(os.path.join(infloc, k), 'wb')
3385                         try:
3386                                 f.write(v + "\n")
3387                         finally:
3388                                 f.close()
3389
3390                 # Store the md5sum in the vdb.
3391                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3392                 try:
3393                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3394                 finally:
3395                         f.close()
3396
3397                 # This gives bashrc users an opportunity to do various things
3398                 # such as remove binary packages after they're installed.
3399                 settings = self.settings
3400                 settings.setcpv(self.pkg)
3401                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3403
3404                 phase = "setup"
3405                 setup_phase = EbuildPhase(background=self.background,
3406                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407                         settings=settings, tree=self._tree)
3408
3409                 setup_phase.addExitListener(self._setup_exit)
3410                 self._current_task = setup_phase
3411                 self.scheduler.scheduleSetup(setup_phase)
3412
3413         def _setup_exit(self, setup_phase):
3414                 if self._default_exit(setup_phase) != os.EX_OK:
3415                         self._unlock_builddir()
3416                         self.wait()
3417                         return
3418
3419                 extractor = BinpkgExtractorAsync(background=self.background,
3420                         image_dir=self._image_dir,
3421                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423                 self._start_task(extractor, self._extractor_exit)
3424
3425         def _extractor_exit(self, extractor):
3426                 if self._final_exit(extractor) != os.EX_OK:
3427                         self._unlock_builddir()
3428                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3429                                 noiselevel=-1)
3430                 self.wait()
3431
3432         def _unlock_builddir(self):
3433                 if self.opts.pretend or self.opts.fetchonly:
3434                         return
3435                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436                 self._build_dir.unlock()
3437
3438         def install(self):
3439
3440                 # This gives bashrc users an opportunity to do various things
3441                 # such as remove binary packages after they're installed.
3442                 settings = self.settings
3443                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3445
3446                 merge = EbuildMerge(find_blockers=self.find_blockers,
3447                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448                         pkg=self.pkg, pkg_count=self.pkg_count,
3449                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3450                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3451
3452                 try:
3453                         retval = merge.execute()
3454                 finally:
3455                         settings.pop("PORTAGE_BINPKG_FILE", None)
3456                         self._unlock_builddir()
3457                 return retval
3458
3459 class BinpkgFetcher(SpawnProcess):
3460
3461         __slots__ = ("pkg", "pretend",
3462                 "locked", "pkg_path", "_lock_obj")
3463
3464         def __init__(self, **kwargs):
3465                 SpawnProcess.__init__(self, **kwargs)
3466                 pkg = self.pkg
3467                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3468
3469         def _start(self):
3470
3471                 if self.cancelled:
3472                         return
3473
3474                 pkg = self.pkg
3475                 pretend = self.pretend
3476                 bintree = pkg.root_config.trees["bintree"]
3477                 settings = bintree.settings
3478                 use_locks = "distlocks" in settings.features
3479                 pkg_path = self.pkg_path
3480
3481                 if not pretend:
3482                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3483                         if use_locks:
3484                                 self.lock()
3485                 exists = os.path.exists(pkg_path)
3486                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487                 if not (pretend or resume):
3488                         # Remove existing file or broken symlink.
3489                         try:
3490                                 os.unlink(pkg_path)
3491                         except OSError:
3492                                 pass
3493
3494                 # urljoin doesn't work correctly with
3495                 # unrecognized protocols like sftp
3496                 if bintree._remote_has_index:
3497                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3498                         if not rel_uri:
3499                                 rel_uri = pkg.cpv + ".tbz2"
3500                         uri = bintree._remote_base_uri.rstrip("/") + \
3501                                 "/" + rel_uri.lstrip("/")
3502                 else:
3503                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504                                 "/" + pkg.pf + ".tbz2"
3505
3506                 if pretend:
3507                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508                         self.returncode = os.EX_OK
3509                         self.wait()
3510                         return
3511
3512                 protocol = urlparse.urlparse(uri)[0]
3513                 fcmd_prefix = "FETCHCOMMAND"
3514                 if resume:
3515                         fcmd_prefix = "RESUMECOMMAND"
3516                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3517                 if not fcmd:
3518                         fcmd = settings.get(fcmd_prefix)
3519
3520                 fcmd_vars = {
3521                         "DISTDIR" : os.path.dirname(pkg_path),
3522                         "URI"     : uri,
3523                         "FILE"    : os.path.basename(pkg_path)
3524                 }
3525
3526                 fetch_env = dict(settings.iteritems())
3527                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528                         for x in shlex.split(fcmd)]
3529
3530                 if self.fd_pipes is None:
3531                         self.fd_pipes = {}
3532                 fd_pipes = self.fd_pipes
3533
3534                 # Redirect all output to stdout since some fetchers like
3535                 # wget pollute stderr (if portage detects a problem then it
3536                 # can send it's own message to stderr).
3537                 fd_pipes.setdefault(0, sys.stdin.fileno())
3538                 fd_pipes.setdefault(1, sys.stdout.fileno())
3539                 fd_pipes.setdefault(2, sys.stdout.fileno())
3540
3541                 self.args = fetch_args
3542                 self.env = fetch_env
3543                 SpawnProcess._start(self)
3544
3545         def _set_returncode(self, wait_retval):
3546                 SpawnProcess._set_returncode(self, wait_retval)
3547                 if self.returncode == os.EX_OK:
3548                         # If possible, update the mtime to match the remote package if
3549                         # the fetcher didn't already do it automatically.
3550                         bintree = self.pkg.root_config.trees["bintree"]
3551                         if bintree._remote_has_index:
3552                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553                                 if remote_mtime is not None:
3554                                         try:
3555                                                 remote_mtime = long(remote_mtime)
3556                                         except ValueError:
3557                                                 pass
3558                                         else:
3559                                                 try:
3560                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3561                                                 except OSError:
3562                                                         pass
3563                                                 else:
3564                                                         if remote_mtime != local_mtime:
3565                                                                 try:
3566                                                                         os.utime(self.pkg_path,
3567                                                                                 (remote_mtime, remote_mtime))
3568                                                                 except OSError:
3569                                                                         pass
3570
3571                 if self.locked:
3572                         self.unlock()
3573
3574         def lock(self):
3575                 """
3576                 This raises an AlreadyLocked exception if lock() is called
3577                 while a lock is already held. In order to avoid this, call
3578                 unlock() or check whether the "locked" attribute is True
3579                 or False before calling lock().
3580                 """
3581                 if self._lock_obj is not None:
3582                         raise self.AlreadyLocked((self._lock_obj,))
3583
3584                 self._lock_obj = portage.locks.lockfile(
3585                         self.pkg_path, wantnewlockfile=1)
3586                 self.locked = True
3587
3588         class AlreadyLocked(portage.exception.PortageException):
3589                 pass
3590
3591         def unlock(self):
3592                 if self._lock_obj is None:
3593                         return
3594                 portage.locks.unlockfile(self._lock_obj)
3595                 self._lock_obj = None
3596                 self.locked = False
3597
3598 class BinpkgVerifier(AsynchronousTask):
3599         __slots__ = ("logfile", "pkg",)
3600
3601         def _start(self):
3602                 """
3603                 Note: Unlike a normal AsynchronousTask.start() method,
3604                 this one does all work is synchronously. The returncode
3605                 attribute will be set before it returns.
3606                 """
3607
3608                 pkg = self.pkg
3609                 root_config = pkg.root_config
3610                 bintree = root_config.trees["bintree"]
3611                 rval = os.EX_OK
3612                 stdout_orig = sys.stdout
3613                 stderr_orig = sys.stderr
3614                 log_file = None
3615                 if self.background and self.logfile is not None:
3616                         log_file = open(self.logfile, 'a')
3617                 try:
3618                         if log_file is not None:
3619                                 sys.stdout = log_file
3620                                 sys.stderr = log_file
3621                         try:
3622                                 bintree.digestCheck(pkg)
3623                         except portage.exception.FileNotFound:
3624                                 writemsg("!!! Fetching Binary failed " + \
3625                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3626                                 rval = 1
3627                         except portage.exception.DigestException, e:
3628                                 writemsg("\n!!! Digest verification failed:\n",
3629                                         noiselevel=-1)
3630                                 writemsg("!!! %s\n" % e.value[0],
3631                                         noiselevel=-1)
3632                                 writemsg("!!! Reason: %s\n" % e.value[1],
3633                                         noiselevel=-1)
3634                                 writemsg("!!! Got: %s\n" % e.value[2],
3635                                         noiselevel=-1)
3636                                 writemsg("!!! Expected: %s\n" % e.value[3],
3637                                         noiselevel=-1)
3638                                 rval = 1
3639                         if rval != os.EX_OK:
3640                                 pkg_path = bintree.getname(pkg.cpv)
3641                                 head, tail = os.path.split(pkg_path)
3642                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3644                                         noiselevel=-1)
3645                 finally:
3646                         sys.stdout = stdout_orig
3647                         sys.stderr = stderr_orig
3648                         if log_file is not None:
3649                                 log_file.close()
3650
3651                 self.returncode = rval
3652                 self.wait()
3653
3654 class BinpkgPrefetcher(CompositeTask):
3655
3656         __slots__ = ("pkg",) + \
3657                 ("pkg_path", "_bintree",)
3658
3659         def _start(self):
3660                 self._bintree = self.pkg.root_config.trees["bintree"]
3661                 fetcher = BinpkgFetcher(background=self.background,
3662                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663                         scheduler=self.scheduler)
3664                 self.pkg_path = fetcher.pkg_path
3665                 self._start_task(fetcher, self._fetcher_exit)
3666
3667         def _fetcher_exit(self, fetcher):
3668
3669                 if self._default_exit(fetcher) != os.EX_OK:
3670                         self.wait()
3671                         return
3672
3673                 verifier = BinpkgVerifier(background=self.background,
3674                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675                 self._start_task(verifier, self._verifier_exit)
3676
3677         def _verifier_exit(self, verifier):
3678                 if self._default_exit(verifier) != os.EX_OK:
3679                         self.wait()
3680                         return
3681
3682                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3683
3684                 self._current_task = None
3685                 self.returncode = os.EX_OK
3686                 self.wait()
3687
3688 class BinpkgExtractorAsync(SpawnProcess):
3689
3690         __slots__ = ("image_dir", "pkg", "pkg_path")
3691
3692         _shell_binary = portage.const.BASH_BINARY
3693
3694         def _start(self):
3695                 self.args = [self._shell_binary, "-c",
3696                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697                         (portage._shell_quote(self.pkg_path),
3698                         portage._shell_quote(self.image_dir))]
3699
3700                 self.env = self.pkg.root_config.settings.environ()
3701                 SpawnProcess._start(self)
3702
3703 class MergeListItem(CompositeTask):
3704
3705         """
3706         TODO: For parallel scheduling, everything here needs asynchronous
3707         execution support (start, poll, and wait methods).
3708         """
3709
3710         __slots__ = ("args_set",
3711                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712                 "find_blockers", "logger", "mtimedb", "pkg",
3713                 "pkg_count", "pkg_to_replace", "prefetcher",
3714                 "settings", "statusMessage", "world_atom") + \
3715                 ("_install_task",)
3716
3717         def _start(self):
3718
3719                 pkg = self.pkg
3720                 build_opts = self.build_opts
3721
3722                 if pkg.installed:
3723                         # uninstall,  executed by self.merge()
3724                         self.returncode = os.EX_OK
3725                         self.wait()
3726                         return
3727
3728                 args_set = self.args_set
3729                 find_blockers = self.find_blockers
3730                 logger = self.logger
3731                 mtimedb = self.mtimedb
3732                 pkg_count = self.pkg_count
3733                 scheduler = self.scheduler
3734                 settings = self.settings
3735                 world_atom = self.world_atom
3736                 ldpath_mtimes = mtimedb["ldpath"]
3737
3738                 action_desc = "Emerging"
3739                 preposition = "for"
3740                 if pkg.type_name == "binary":
3741                         action_desc += " binary"
3742
3743                 if build_opts.fetchonly:
3744                         action_desc = "Fetching"
3745
3746                 msg = "%s (%s of %s) %s" % \
3747                         (action_desc,
3748                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750                         colorize("GOOD", pkg.cpv))
3751
3752                 portdb = pkg.root_config.trees["porttree"].dbapi
3753                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3754                 if portdir_repo_name:
3755                         pkg_repo_name = pkg.metadata.get("repository")
3756                         if pkg_repo_name != portdir_repo_name:
3757                                 if not pkg_repo_name:
3758                                         pkg_repo_name = "unknown repo"
3759                                 msg += " from %s" % pkg_repo_name
3760
3761                 if pkg.root != "/":
3762                         msg += " %s %s" % (preposition, pkg.root)
3763
3764                 if not build_opts.pretend:
3765                         self.statusMessage(msg)
3766                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3767                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3768
3769                 if pkg.type_name == "ebuild":
3770
3771                         build = EbuildBuild(args_set=args_set,
3772                                 background=self.background,
3773                                 config_pool=self.config_pool,
3774                                 find_blockers=find_blockers,
3775                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3776                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3777                                 prefetcher=self.prefetcher, scheduler=scheduler,
3778                                 settings=settings, world_atom=world_atom)
3779
3780                         self._install_task = build
3781                         self._start_task(build, self._default_final_exit)
3782                         return
3783
3784                 elif pkg.type_name == "binary":
3785
3786                         binpkg = Binpkg(background=self.background,
3787                                 find_blockers=find_blockers,
3788                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3789                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3790                                 prefetcher=self.prefetcher, settings=settings,
3791                                 scheduler=scheduler, world_atom=world_atom)
3792
3793                         self._install_task = binpkg
3794                         self._start_task(binpkg, self._default_final_exit)
3795                         return
3796
3797         def _poll(self):
3798                 self._install_task.poll()
3799                 return self.returncode
3800
3801         def _wait(self):
3802                 self._install_task.wait()
3803                 return self.returncode
3804
3805         def merge(self):
3806
3807                 pkg = self.pkg
3808                 build_opts = self.build_opts
3809                 find_blockers = self.find_blockers
3810                 logger = self.logger
3811                 mtimedb = self.mtimedb
3812                 pkg_count = self.pkg_count
3813                 prefetcher = self.prefetcher
3814                 scheduler = self.scheduler
3815                 settings = self.settings
3816                 world_atom = self.world_atom
3817                 ldpath_mtimes = mtimedb["ldpath"]
3818
3819                 if pkg.installed:
3820                         if not (build_opts.buildpkgonly or \
3821                                 build_opts.fetchonly or build_opts.pretend):
3822
3823                                 uninstall = PackageUninstall(background=self.background,
3824                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3825                                         pkg=pkg, scheduler=scheduler, settings=settings)
3826
3827                                 uninstall.start()
3828                                 retval = uninstall.wait()
3829                                 if retval != os.EX_OK:
3830                                         return retval
3831                         return os.EX_OK
3832
3833                 if build_opts.fetchonly or \
3834                         build_opts.buildpkgonly:
3835                         return self.returncode
3836
3837                 retval = self._install_task.install()
3838                 return retval
3839
3840 class PackageMerge(AsynchronousTask):
3841         """
3842         TODO: Implement asynchronous merge so that the scheduler can
3843         run while a merge is executing.
3844         """
3845
3846         __slots__ = ("merge",)
3847
3848         def _start(self):
3849
3850                 pkg = self.merge.pkg
3851                 pkg_count = self.merge.pkg_count
3852
3853                 if pkg.installed:
3854                         action_desc = "Uninstalling"
3855                         preposition = "from"
3856                 else:
3857                         action_desc = "Installing"
3858                         preposition = "to"
3859
3860                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3861
3862                 if pkg.root != "/":
3863                         msg += " %s %s" % (preposition, pkg.root)
3864
3865                 if not self.merge.build_opts.fetchonly and \
3866                         not self.merge.build_opts.pretend and \
3867                         not self.merge.build_opts.buildpkgonly:
3868                         self.merge.statusMessage(msg)
3869
3870                 self.returncode = self.merge.merge()
3871                 self.wait()
3872
3873 class DependencyArg(object):
3874         def __init__(self, arg=None, root_config=None):
3875                 self.arg = arg
3876                 self.root_config = root_config
3877
3878         def __str__(self):
3879                 return str(self.arg)
3880
3881 class AtomArg(DependencyArg):
3882         def __init__(self, atom=None, **kwargs):
3883                 DependencyArg.__init__(self, **kwargs)
3884                 self.atom = atom
3885                 if not isinstance(self.atom, portage.dep.Atom):
3886                         self.atom = portage.dep.Atom(self.atom)
3887                 self.set = (self.atom, )
3888
3889 class PackageArg(DependencyArg):
3890         def __init__(self, package=None, **kwargs):
3891                 DependencyArg.__init__(self, **kwargs)
3892                 self.package = package
3893                 self.atom = portage.dep.Atom("=" + package.cpv)
3894                 self.set = (self.atom, )
3895
3896 class SetArg(DependencyArg):
3897         def __init__(self, set=None, **kwargs):
3898                 DependencyArg.__init__(self, **kwargs)
3899                 self.set = set
3900                 self.name = self.arg[len(SETPREFIX):]
3901
3902 class Dependency(SlotObject):
3903         __slots__ = ("atom", "blocker", "depth",
3904                 "parent", "onlydeps", "priority", "root")
3905         def __init__(self, **kwargs):
3906                 SlotObject.__init__(self, **kwargs)
3907                 if self.priority is None:
3908                         self.priority = DepPriority()
3909                 if self.depth is None:
3910                         self.depth = 0
3911
3912 class BlockerCache(DictMixin):
3913         """This caches blockers of installed packages so that dep_check does not
3914         have to be done for every single installed package on every invocation of
3915         emerge.  The cache is invalidated whenever it is detected that something
3916         has changed that might alter the results of dep_check() calls:
3917                 1) the set of installed packages (including COUNTER) has changed
3918                 2) the old-style virtuals have changed
3919         """
3920
3921         # Number of uncached packages to trigger cache update, since
3922         # it's wasteful to update it for every vdb change.
3923         _cache_threshold = 5
3924
3925         class BlockerData(object):
3926
3927                 __slots__ = ("__weakref__", "atoms", "counter")
3928
3929                 def __init__(self, counter, atoms):
3930                         self.counter = counter
3931                         self.atoms = atoms
3932
3933         def __init__(self, myroot, vardb):
3934                 self._vardb = vardb
3935                 self._virtuals = vardb.settings.getvirtuals()
3936                 self._cache_filename = os.path.join(myroot,
3937                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3938                 self._cache_version = "1"
3939                 self._cache_data = None
3940                 self._modified = set()
3941                 self._load()
3942
3943         def _load(self):
3944                 try:
3945                         f = open(self._cache_filename)
3946                         mypickle = pickle.Unpickler(f)
3947                         mypickle.find_global = None
3948                         self._cache_data = mypickle.load()
3949                         f.close()
3950                         del f
3951                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3952                         if isinstance(e, pickle.UnpicklingError):
3953                                 writemsg("!!! Error loading '%s': %s\n" % \
3954                                         (self._cache_filename, str(e)), noiselevel=-1)
3955                         del e
3956
3957                 cache_valid = self._cache_data and \
3958                         isinstance(self._cache_data, dict) and \
3959                         self._cache_data.get("version") == self._cache_version and \
3960                         isinstance(self._cache_data.get("blockers"), dict)
3961                 if cache_valid:
3962                         # Validate all the atoms and counters so that
3963                         # corruption is detected as soon as possible.
3964                         invalid_items = set()
3965                         for k, v in self._cache_data["blockers"].iteritems():
3966                                 if not isinstance(k, basestring):
3967                                         invalid_items.add(k)
3968                                         continue
3969                                 try:
3970                                         if portage.catpkgsplit(k) is None:
3971                                                 invalid_items.add(k)
3972                                                 continue
3973                                 except portage.exception.InvalidData:
3974                                         invalid_items.add(k)
3975                                         continue
3976                                 if not isinstance(v, tuple) or \
3977                                         len(v) != 2:
3978                                         invalid_items.add(k)
3979                                         continue
3980                                 counter, atoms = v
3981                                 if not isinstance(counter, (int, long)):
3982                                         invalid_items.add(k)
3983                                         continue
3984                                 if not isinstance(atoms, (list, tuple)):
3985                                         invalid_items.add(k)
3986                                         continue
3987                                 invalid_atom = False
3988                                 for atom in atoms:
3989                                         if not isinstance(atom, basestring):
3990                                                 invalid_atom = True
3991                                                 break
3992                                         if atom[:1] != "!" or \
3993                                                 not portage.isvalidatom(
3994                                                 atom, allow_blockers=True):
3995                                                 invalid_atom = True
3996                                                 break
3997                                 if invalid_atom:
3998                                         invalid_items.add(k)
3999                                         continue
4000
4001                         for k in invalid_items:
4002                                 del self._cache_data["blockers"][k]
4003                         if not self._cache_data["blockers"]:
4004                                 cache_valid = False
4005
4006                 if not cache_valid:
4007                         self._cache_data = {"version":self._cache_version}
4008                         self._cache_data["blockers"] = {}
4009                         self._cache_data["virtuals"] = self._virtuals
4010                 self._modified.clear()
4011
4012         def flush(self):
4013                 """If the current user has permission and the internal blocker cache
4014                 been updated, save it to disk and mark it unmodified.  This is called
4015                 by emerge after it has proccessed blockers for all installed packages.
4016                 Currently, the cache is only written if the user has superuser
4017                 privileges (since that's required to obtain a lock), but all users
4018                 have read access and benefit from faster blocker lookups (as long as
4019                 the entire cache is still valid).  The cache is stored as a pickled
4020                 dict object with the following format:
4021
4022                 {
4023                         version : "1",
4024                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4025                         "virtuals" : vardb.settings.getvirtuals()
4026                 }
4027                 """
4028                 if len(self._modified) >= self._cache_threshold and \
4029                         secpass >= 2:
4030                         try:
4031                                 f = portage.util.atomic_ofstream(self._cache_filename)
4032                                 pickle.dump(self._cache_data, f, -1)
4033                                 f.close()
4034                                 portage.util.apply_secpass_permissions(
4035                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4036                         except (IOError, OSError), e:
4037                                 pass
4038                         self._modified.clear()
4039
4040         def __setitem__(self, cpv, blocker_data):
4041                 """
4042                 Update the cache and mark it as modified for a future call to
4043                 self.flush().
4044
4045                 @param cpv: Package for which to cache blockers.
4046                 @type cpv: String
4047                 @param blocker_data: An object with counter and atoms attributes.
4048                 @type blocker_data: BlockerData
4049                 """
4050                 self._cache_data["blockers"][cpv] = \
4051                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4052                 self._modified.add(cpv)
4053
4054         def __iter__(self):
4055                 if self._cache_data is None:
4056                         # triggered by python-trace
4057                         return iter([])
4058                 return iter(self._cache_data["blockers"])
4059
4060         def __delitem__(self, cpv):
4061                 del self._cache_data["blockers"][cpv]
4062
4063         def __getitem__(self, cpv):
4064                 """
4065                 @rtype: BlockerData
4066                 @returns: An object with counter and atoms attributes.
4067                 """
4068                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4069
4070         def keys(self):
4071                 """This needs to be implemented so that self.__repr__() doesn't raise
4072                 an AttributeError."""
4073                 return list(self)
4074
4075 class BlockerDB(object):
4076
4077         def __init__(self, root_config):
4078                 self._root_config = root_config
4079                 self._vartree = root_config.trees["vartree"]
4080                 self._portdb = root_config.trees["porttree"].dbapi
4081
4082                 self._dep_check_trees = None
4083                 self._fake_vartree = None
4084
4085         def _get_fake_vartree(self, acquire_lock=0):
4086                 fake_vartree = self._fake_vartree
4087                 if fake_vartree is None:
4088                         fake_vartree = FakeVartree(self._root_config,
4089                                 acquire_lock=acquire_lock)
4090                         self._fake_vartree = fake_vartree
4091                         self._dep_check_trees = { self._vartree.root : {
4092                                 "porttree"    :  fake_vartree,
4093                                 "vartree"     :  fake_vartree,
4094                         }}
4095                 else:
4096                         fake_vartree.sync(acquire_lock=acquire_lock)
4097                 return fake_vartree
4098
4099         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4100                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4101                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4102                 settings = self._vartree.settings
4103                 stale_cache = set(blocker_cache)
4104                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4105                 dep_check_trees = self._dep_check_trees
4106                 vardb = fake_vartree.dbapi
4107                 installed_pkgs = list(vardb)
4108
4109                 for inst_pkg in installed_pkgs:
4110                         stale_cache.discard(inst_pkg.cpv)
4111                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4112                         if cached_blockers is not None and \
4113                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4114                                 cached_blockers = None
4115                         if cached_blockers is not None:
4116                                 blocker_atoms = cached_blockers.atoms
4117                         else:
4118                                 # Use aux_get() to trigger FakeVartree global
4119                                 # updates on *DEPEND when appropriate.
4120                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4121                                 try:
4122                                         portage.dep._dep_check_strict = False
4123                                         success, atoms = portage.dep_check(depstr,
4124                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4125                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4126                                 finally:
4127                                         portage.dep._dep_check_strict = True
4128                                 if not success:
4129                                         pkg_location = os.path.join(inst_pkg.root,
4130                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4131                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4132                                                 (pkg_location, atoms), noiselevel=-1)
4133                                         continue
4134
4135                                 blocker_atoms = [atom for atom in atoms \
4136                                         if atom.startswith("!")]
4137                                 blocker_atoms.sort()
4138                                 counter = long(inst_pkg.metadata["COUNTER"])
4139                                 blocker_cache[inst_pkg.cpv] = \
4140                                         blocker_cache.BlockerData(counter, blocker_atoms)
4141                 for cpv in stale_cache:
4142                         del blocker_cache[cpv]
4143                 blocker_cache.flush()
4144
4145                 blocker_parents = digraph()
4146                 blocker_atoms = []
4147                 for pkg in installed_pkgs:
4148                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4149                                 blocker_atom = blocker_atom.lstrip("!")
4150                                 blocker_atoms.append(blocker_atom)
4151                                 blocker_parents.add(blocker_atom, pkg)
4152
4153                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4154                 blocking_pkgs = set()
4155                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4156                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4157
4158                 # Check for blockers in the other direction.
4159                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4160                 try:
4161                         portage.dep._dep_check_strict = False
4162                         success, atoms = portage.dep_check(depstr,
4163                                 vardb, settings, myuse=new_pkg.use.enabled,
4164                                 trees=dep_check_trees, myroot=new_pkg.root)
4165                 finally:
4166                         portage.dep._dep_check_strict = True
4167                 if not success:
4168                         # We should never get this far with invalid deps.
4169                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4170                         assert False
4171
4172                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4173                         if atom[:1] == "!"]
4174                 if blocker_atoms:
4175                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4176                         for inst_pkg in installed_pkgs:
4177                                 try:
4178                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4179                                 except (portage.exception.InvalidDependString, StopIteration):
4180                                         continue
4181                                 blocking_pkgs.add(inst_pkg)
4182
4183                 return blocking_pkgs
4184
4185 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4186
4187         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4188                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4189         p_type, p_root, p_key, p_status = parent_node
4190         msg = []
4191         if p_status == "nomerge":
4192                 category, pf = portage.catsplit(p_key)
4193                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4194                 msg.append("Portage is unable to process the dependencies of the ")
4195                 msg.append("'%s' package. " % p_key)
4196                 msg.append("In order to correct this problem, the package ")
4197                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4198                 msg.append("As a temporary workaround, the --nodeps option can ")
4199                 msg.append("be used to ignore all dependencies.  For reference, ")
4200                 msg.append("the problematic dependencies can be found in the ")
4201                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4202         else:
4203                 msg.append("This package can not be installed. ")
4204                 msg.append("Please notify the '%s' package maintainer " % p_key)
4205                 msg.append("about this problem.")
4206
4207         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4208         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4209
4210 class PackageVirtualDbapi(portage.dbapi):
4211         """
4212         A dbapi-like interface class that represents the state of the installed
4213         package database as new packages are installed, replacing any packages
4214         that previously existed in the same slot. The main difference between
4215         this class and fakedbapi is that this one uses Package instances
4216         internally (passed in via cpv_inject() and cpv_remove() calls).
4217         """
4218         def __init__(self, settings):
4219                 portage.dbapi.__init__(self)
4220                 self.settings = settings
4221                 self._match_cache = {}
4222                 self._cp_map = {}
4223                 self._cpv_map = {}
4224
4225         def clear(self):
4226                 """
4227                 Remove all packages.
4228                 """
4229                 if self._cpv_map:
4230                         self._clear_cache()
4231                         self._cp_map.clear()
4232                         self._cpv_map.clear()
4233
4234         def copy(self):
4235                 obj = PackageVirtualDbapi(self.settings)
4236                 obj._match_cache = self._match_cache.copy()
4237                 obj._cp_map = self._cp_map.copy()
4238                 for k, v in obj._cp_map.iteritems():
4239                         obj._cp_map[k] = v[:]
4240                 obj._cpv_map = self._cpv_map.copy()
4241                 return obj
4242
4243         def __iter__(self):
4244                 return self._cpv_map.itervalues()
4245
4246         def __contains__(self, item):
4247                 existing = self._cpv_map.get(item.cpv)
4248                 if existing is not None and \
4249                         existing == item:
4250                         return True
4251                 return False
4252
4253         def get(self, item, default=None):
4254                 cpv = getattr(item, "cpv", None)
4255                 if cpv is None:
4256                         if len(item) != 4:
4257                                 return default
4258                         type_name, root, cpv, operation = item
4259
4260                 existing = self._cpv_map.get(cpv)
4261                 if existing is not None and \
4262                         existing == item:
4263                         return existing
4264                 return default
4265
4266         def match_pkgs(self, atom):
4267                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4268
4269         def _clear_cache(self):
4270                 if self._categories is not None:
4271                         self._categories = None
4272                 if self._match_cache:
4273                         self._match_cache = {}
4274
4275         def match(self, origdep, use_cache=1):
4276                 result = self._match_cache.get(origdep)
4277                 if result is not None:
4278                         return result[:]
4279                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4280                 self._match_cache[origdep] = result
4281                 return result[:]
4282
4283         def cpv_exists(self, cpv):
4284                 return cpv in self._cpv_map
4285
4286         def cp_list(self, mycp, use_cache=1):
4287                 cachelist = self._match_cache.get(mycp)
4288                 # cp_list() doesn't expand old-style virtuals
4289                 if cachelist and cachelist[0].startswith(mycp):
4290                         return cachelist[:]
4291                 cpv_list = self._cp_map.get(mycp)
4292                 if cpv_list is None:
4293                         cpv_list = []
4294                 else:
4295                         cpv_list = [pkg.cpv for pkg in cpv_list]
4296                 self._cpv_sort_ascending(cpv_list)
4297                 if not (not cpv_list and mycp.startswith("virtual/")):
4298                         self._match_cache[mycp] = cpv_list
4299                 return cpv_list[:]
4300
4301         def cp_all(self):
4302                 return list(self._cp_map)
4303
4304         def cpv_all(self):
4305                 return list(self._cpv_map)
4306
4307         def cpv_inject(self, pkg):
4308                 cp_list = self._cp_map.get(pkg.cp)
4309                 if cp_list is None:
4310                         cp_list = []
4311                         self._cp_map[pkg.cp] = cp_list
4312                 e_pkg = self._cpv_map.get(pkg.cpv)
4313                 if e_pkg is not None:
4314                         if e_pkg == pkg:
4315                                 return
4316                         self.cpv_remove(e_pkg)
4317                 for e_pkg in cp_list:
4318                         if e_pkg.slot_atom == pkg.slot_atom:
4319                                 if e_pkg == pkg:
4320                                         return
4321                                 self.cpv_remove(e_pkg)
4322                                 break
4323                 cp_list.append(pkg)
4324                 self._cpv_map[pkg.cpv] = pkg
4325                 self._clear_cache()
4326
4327         def cpv_remove(self, pkg):
4328                 old_pkg = self._cpv_map.get(pkg.cpv)
4329                 if old_pkg != pkg:
4330                         raise KeyError(pkg)
4331                 self._cp_map[pkg.cp].remove(pkg)
4332                 del self._cpv_map[pkg.cpv]
4333                 self._clear_cache()
4334
4335         def aux_get(self, cpv, wants):
4336                 metadata = self._cpv_map[cpv].metadata
4337                 return [metadata.get(x, "") for x in wants]
4338
4339         def aux_update(self, cpv, values):
4340                 self._cpv_map[cpv].metadata.update(values)
4341                 self._clear_cache()
4342
4343 class depgraph(object):
4344
4345         pkg_tree_map = RootConfig.pkg_tree_map
4346
4347         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4348
4349         def __init__(self, settings, trees, myopts, myparams, spinner):
4350                 self.settings = settings
4351                 self.target_root = settings["ROOT"]
4352                 self.myopts = myopts
4353                 self.myparams = myparams
4354                 self.edebug = 0
4355                 if settings.get("PORTAGE_DEBUG", "") == "1":
4356                         self.edebug = 1
4357                 self.spinner = spinner
4358                 self._running_root = trees["/"]["root_config"]
4359                 self._opts_no_restart = Scheduler._opts_no_restart
4360                 self.pkgsettings = {}
4361                 # Maps slot atom to package for each Package added to the graph.
4362                 self._slot_pkg_map = {}
4363                 # Maps nodes to the reasons they were selected for reinstallation.
4364                 self._reinstall_nodes = {}
4365                 self.mydbapi = {}
4366                 self.trees = {}
4367                 self._trees_orig = trees
4368                 self.roots = {}
4369                 # Contains a filtered view of preferred packages that are selected
4370                 # from available repositories.
4371                 self._filtered_trees = {}
4372                 # Contains installed packages and new packages that have been added
4373                 # to the graph.
4374                 self._graph_trees = {}
4375                 # All Package instances
4376                 self._pkg_cache = {}
4377                 for myroot in trees:
4378                         self.trees[myroot] = {}
4379                         # Create a RootConfig instance that references
4380                         # the FakeVartree instead of the real one.
4381                         self.roots[myroot] = RootConfig(
4382                                 trees[myroot]["vartree"].settings,
4383                                 self.trees[myroot],
4384                                 trees[myroot]["root_config"].setconfig)
4385                         for tree in ("porttree", "bintree"):
4386                                 self.trees[myroot][tree] = trees[myroot][tree]
4387                         self.trees[myroot]["vartree"] = \
4388                                 FakeVartree(trees[myroot]["root_config"],
4389                                         pkg_cache=self._pkg_cache)
4390                         self.pkgsettings[myroot] = portage.config(
4391                                 clone=self.trees[myroot]["vartree"].settings)
4392                         self._slot_pkg_map[myroot] = {}
4393                         vardb = self.trees[myroot]["vartree"].dbapi
4394                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4395                                 "--buildpkgonly" not in self.myopts
4396                         # This fakedbapi instance will model the state that the vdb will
4397                         # have after new packages have been installed.
4398                         fakedb = PackageVirtualDbapi(vardb.settings)
4399                         if preload_installed_pkgs:
4400                                 for pkg in vardb:
4401                                         self.spinner.update()
4402                                         # This triggers metadata updates via FakeVartree.
4403                                         vardb.aux_get(pkg.cpv, [])
4404                                         fakedb.cpv_inject(pkg)
4405
4406                         # Now that the vardb state is cached in our FakeVartree,
4407                         # we won't be needing the real vartree cache for awhile.
4408                         # To make some room on the heap, clear the vardbapi
4409                         # caches.
4410                         trees[myroot]["vartree"].dbapi._clear_cache()
4411                         gc.collect()
4412
4413                         self.mydbapi[myroot] = fakedb
4414                         def graph_tree():
4415                                 pass
4416                         graph_tree.dbapi = fakedb
4417                         self._graph_trees[myroot] = {}
4418                         self._filtered_trees[myroot] = {}
4419                         # Substitute the graph tree for the vartree in dep_check() since we
4420                         # want atom selections to be consistent with package selections
4421                         # have already been made.
4422                         self._graph_trees[myroot]["porttree"]   = graph_tree
4423                         self._graph_trees[myroot]["vartree"]    = graph_tree
4424                         def filtered_tree():
4425                                 pass
4426                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4427                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4428
4429                         # Passing in graph_tree as the vartree here could lead to better
4430                         # atom selections in some cases by causing atoms for packages that
4431                         # have been added to the graph to be preferred over other choices.
4432                         # However, it can trigger atom selections that result in
4433                         # unresolvable direct circular dependencies. For example, this
4434                         # happens with gwydion-dylan which depends on either itself or
4435                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4436                         # gwydion-dylan-bin needs to be selected in order to avoid a
4437                         # an unresolvable direct circular dependency.
4438                         #
4439                         # To solve the problem described above, pass in "graph_db" so that
4440                         # packages that have been added to the graph are distinguishable
4441                         # from other available packages and installed packages. Also, pass
4442                         # the parent package into self._select_atoms() calls so that
4443                         # unresolvable direct circular dependencies can be detected and
4444                         # avoided when possible.
4445                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4446                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4447
4448                         dbs = []
4449                         portdb = self.trees[myroot]["porttree"].dbapi
4450                         bindb  = self.trees[myroot]["bintree"].dbapi
4451                         vardb  = self.trees[myroot]["vartree"].dbapi
4452                         #               (db, pkg_type, built, installed, db_keys)
4453                         if "--usepkgonly" not in self.myopts:
4454                                 db_keys = list(portdb._aux_cache_keys)
4455                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4456                         if "--usepkg" in self.myopts:
4457                                 db_keys = list(bindb._aux_cache_keys)
4458                                 dbs.append((bindb,  "binary", True, False, db_keys))
4459                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4460                         dbs.append((vardb, "installed", True, True, db_keys))
4461                         self._filtered_trees[myroot]["dbs"] = dbs
4462                         if "--usepkg" in self.myopts:
4463                                 self.trees[myroot]["bintree"].populate(
4464                                         "--getbinpkg" in self.myopts,
4465                                         "--getbinpkgonly" in self.myopts)
4466                 del trees
4467
4468                 self.digraph=portage.digraph()
4469                 # contains all sets added to the graph
4470                 self._sets = {}
4471                 # contains atoms given as arguments
4472                 self._sets["args"] = InternalPackageSet()
4473                 # contains all atoms from all sets added to the graph, including
4474                 # atoms given as arguments
4475                 self._set_atoms = InternalPackageSet()
4476                 self._atom_arg_map = {}
4477                 # contains all nodes pulled in by self._set_atoms
4478                 self._set_nodes = set()
4479                 # Contains only Blocker -> Uninstall edges
4480                 self._blocker_uninstalls = digraph()
4481                 # Contains only Package -> Blocker edges
4482                 self._blocker_parents = digraph()
4483                 # Contains only irrelevant Package -> Blocker edges
4484                 self._irrelevant_blockers = digraph()
4485                 # Contains only unsolvable Package -> Blocker edges
4486                 self._unsolvable_blockers = digraph()
4487                 # Contains all Blocker -> Blocked Package edges
4488                 self._blocked_pkgs = digraph()
4489                 # Contains world packages that have been protected from
4490                 # uninstallation but may not have been added to the graph
4491                 # if the graph is not complete yet.
4492                 self._blocked_world_pkgs = {}
4493                 self._slot_collision_info = {}
4494                 # Slot collision nodes are not allowed to block other packages since
4495                 # blocker validation is only able to account for one package per slot.
4496                 self._slot_collision_nodes = set()
4497                 self._parent_atoms = {}
4498                 self._slot_conflict_parent_atoms = set()
4499                 self._serialized_tasks_cache = None
4500                 self._scheduler_graph = None
4501                 self._displayed_list = None
4502                 self._pprovided_args = []
4503                 self._missing_args = []
4504                 self._masked_installed = set()
4505                 self._unsatisfied_deps_for_display = []
4506                 self._unsatisfied_blockers_for_display = None
4507                 self._circular_deps_for_display = None
4508                 self._dep_stack = []
4509                 self._unsatisfied_deps = []
4510                 self._initially_unsatisfied_deps = []
4511                 self._ignored_deps = []
4512                 self._required_set_names = set(["system", "world"])
4513                 self._select_atoms = self._select_atoms_highest_available
4514                 self._select_package = self._select_pkg_highest_available
4515                 self._highest_pkg_cache = {}
4516
4517         def _show_slot_collision_notice(self):
4518                 """Show an informational message advising the user to mask one of the
4519                 the packages. In some cases it may be possible to resolve this
4520                 automatically, but support for backtracking (removal nodes that have
4521                 already been selected) will be required in order to handle all possible
4522                 cases.
4523                 """
4524
4525                 if not self._slot_collision_info:
4526                         return
4527
4528                 self._show_merge_list()
4529
4530                 msg = []
4531                 msg.append("\n!!! Multiple package instances within a single " + \
4532                         "package slot have been pulled\n")
4533                 msg.append("!!! into the dependency graph, resulting" + \
4534                         " in a slot conflict:\n\n")
4535                 indent = "  "
4536                 # Max number of parents shown, to avoid flooding the display.
4537                 max_parents = 3
4538                 explanation_columns = 70
4539                 explanations = 0
4540                 for (slot_atom, root), slot_nodes \
4541                         in self._slot_collision_info.iteritems():
4542                         msg.append(str(slot_atom))
4543                         msg.append("\n\n")
4544
4545                         for node in slot_nodes:
4546                                 msg.append(indent)
4547                                 msg.append(str(node))
4548                                 parent_atoms = self._parent_atoms.get(node)
4549                                 if parent_atoms:
4550                                         pruned_list = set()
4551                                         # Prefer conflict atoms over others.
4552                                         for parent_atom in parent_atoms:
4553                                                 if len(pruned_list) >= max_parents:
4554                                                         break
4555                                                 if parent_atom in self._slot_conflict_parent_atoms:
4556                                                         pruned_list.add(parent_atom)
4557
4558                                         # If this package was pulled in by conflict atoms then
4559                                         # show those alone since those are the most interesting.
4560                                         if not pruned_list:
4561                                                 # When generating the pruned list, prefer instances
4562                                                 # of DependencyArg over instances of Package.
4563                                                 for parent_atom in parent_atoms:
4564                                                         if len(pruned_list) >= max_parents:
4565                                                                 break
4566                                                         parent, atom = parent_atom
4567                                                         if isinstance(parent, DependencyArg):
4568                                                                 pruned_list.add(parent_atom)
4569                                                 # Prefer Packages instances that themselves have been
4570                                                 # pulled into collision slots.
4571                                                 for parent_atom in parent_atoms:
4572                                                         if len(pruned_list) >= max_parents:
4573                                                                 break
4574                                                         parent, atom = parent_atom
4575                                                         if isinstance(parent, Package) and \
4576                                                                 (parent.slot_atom, parent.root) \
4577                                                                 in self._slot_collision_info:
4578                                                                 pruned_list.add(parent_atom)
4579                                                 for parent_atom in parent_atoms:
4580                                                         if len(pruned_list) >= max_parents:
4581                                                                 break
4582                                                         pruned_list.add(parent_atom)
4583                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4584                                         parent_atoms = pruned_list
4585                                         msg.append(" pulled in by\n")
4586                                         for parent_atom in parent_atoms:
4587                                                 parent, atom = parent_atom
4588                                                 msg.append(2*indent)
4589                                                 if isinstance(parent,
4590                                                         (PackageArg, AtomArg)):
4591                                                         # For PackageArg and AtomArg types, it's
4592                                                         # redundant to display the atom attribute.
4593                                                         msg.append(str(parent))
4594                                                 else:
4595                                                         # Display the specific atom from SetArg or
4596                                                         # Package types.
4597                                                         msg.append("%s required by %s" % (atom, parent))
4598                                                 msg.append("\n")
4599                                         if omitted_parents:
4600                                                 msg.append(2*indent)
4601                                                 msg.append("(and %d more)\n" % omitted_parents)
4602                                 else:
4603                                         msg.append(" (no parents)\n")
4604                                 msg.append("\n")
4605                         explanation = self._slot_conflict_explanation(slot_nodes)
4606                         if explanation:
4607                                 explanations += 1
4608                                 msg.append(indent + "Explanation:\n\n")
4609                                 for line in textwrap.wrap(explanation, explanation_columns):
4610                                         msg.append(2*indent + line + "\n")
4611                                 msg.append("\n")
4612                 msg.append("\n")
4613                 sys.stderr.write("".join(msg))
4614                 sys.stderr.flush()
4615
4616                 explanations_for_all = explanations == len(self._slot_collision_info)
4617
4618                 if explanations_for_all or "--quiet" in self.myopts:
4619                         return
4620
4621                 msg = []
4622                 msg.append("It may be possible to solve this problem ")
4623                 msg.append("by using package.mask to prevent one of ")
4624                 msg.append("those packages from being selected. ")
4625                 msg.append("However, it is also possible that conflicting ")
4626                 msg.append("dependencies exist such that they are impossible to ")
4627                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4628                 msg.append("the dependencies of two different packages, then those ")
4629                 msg.append("packages can not be installed simultaneously.")
4630
4631                 from formatter import AbstractFormatter, DumbWriter
4632                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4633                 for x in msg:
4634                         f.add_flowing_data(x)
4635                 f.end_paragraph(1)
4636
4637                 msg = []
4638                 msg.append("For more information, see MASKED PACKAGES ")
4639                 msg.append("section in the emerge man page or refer ")
4640                 msg.append("to the Gentoo Handbook.")
4641                 for x in msg:
4642                         f.add_flowing_data(x)
4643                 f.end_paragraph(1)
4644                 f.writer.flush()
4645
4646         def _slot_conflict_explanation(self, slot_nodes):
4647                 """
4648                 When a slot conflict occurs due to USE deps, there are a few
4649                 different cases to consider:
4650
4651                 1) New USE are correctly set but --newuse wasn't requested so an
4652                    installed package with incorrect USE happened to get pulled
4653                    into graph before the new one.
4654
4655                 2) New USE are incorrectly set but an installed package has correct
4656                    USE so it got pulled into the graph, and a new instance also got
4657                    pulled in due to --newuse or an upgrade.
4658
4659                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4660                    and multiple package instances got pulled into the same slot to
4661                    satisfy the conflicting deps.
4662
4663                 Currently, explanations and suggested courses of action are generated
4664                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4665                 """
4666
4667                 if len(slot_nodes) != 2:
4668                         # Suggestions are only implemented for
4669                         # conflicts between two packages.
4670                         return None
4671
4672                 all_conflict_atoms = self._slot_conflict_parent_atoms
4673                 matched_node = None
4674                 matched_atoms = None
4675                 unmatched_node = None
4676                 for node in slot_nodes:
4677                         parent_atoms = self._parent_atoms.get(node)
4678                         if not parent_atoms:
4679                                 # Normally, there are always parent atoms. If there are
4680                                 # none then something unexpected is happening and there's
4681                                 # currently no suggestion for this case.
4682                                 return None
4683                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4684                         for parent_atom in conflict_atoms:
4685                                 parent, atom = parent_atom
4686                                 if not atom.use:
4687                                         # Suggestions are currently only implemented for cases
4688                                         # in which all conflict atoms have USE deps.
4689                                         return None
4690                         if conflict_atoms:
4691                                 if matched_node is not None:
4692                                         # If conflict atoms match multiple nodes
4693                                         # then there's no suggestion.
4694                                         return None
4695                                 matched_node = node
4696                                 matched_atoms = conflict_atoms
4697                         else:
4698                                 if unmatched_node is not None:
4699                                         # Neither node is matched by conflict atoms, and
4700                                         # there is no suggestion for this case.
4701                                         return None
4702                                 unmatched_node = node
4703
4704                 if matched_node is None or unmatched_node is None:
4705                         # This shouldn't happen.
4706                         return None
4707
4708                 if unmatched_node.installed and not matched_node.installed:
4709                         return "New USE are correctly set, but --newuse wasn't" + \
4710                                 " requested, so an installed package with incorrect USE " + \
4711                                 "happened to get pulled into the dependency graph. " + \
4712                                 "In order to solve " + \
4713                                 "this, either specify the --newuse option or explicitly " + \
4714                                 " reinstall '%s'." % matched_node.slot_atom
4715
4716                 if matched_node.installed and not unmatched_node.installed:
4717                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4718                         explanation = ("New USE for '%s' are incorrectly set. " + \
4719                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4720                                 (matched_node.slot_atom, atoms[0])
4721                         if len(atoms) > 1:
4722                                 for atom in atoms[1:-1]:
4723                                         explanation += ", '%s'" % (atom,)
4724                                 if len(atoms) > 2:
4725                                         explanation += ","
4726                                 explanation += " and '%s'" % (atoms[-1],)
4727                         explanation += "."
4728                         return explanation
4729
4730                 return None
4731
4732         def _process_slot_conflicts(self):
4733                 """
4734                 Process slot conflict data to identify specific atoms which
4735                 lead to conflict. These atoms only match a subset of the
4736                 packages that have been pulled into a given slot.
4737                 """
4738                 for (slot_atom, root), slot_nodes \
4739                         in self._slot_collision_info.iteritems():
4740
4741                         all_parent_atoms = set()
4742                         for pkg in slot_nodes:
4743                                 parent_atoms = self._parent_atoms.get(pkg)
4744                                 if not parent_atoms:
4745                                         continue
4746                                 all_parent_atoms.update(parent_atoms)
4747
4748                         for pkg in slot_nodes:
4749                                 parent_atoms = self._parent_atoms.get(pkg)
4750                                 if parent_atoms is None:
4751                                         parent_atoms = set()
4752                                         self._parent_atoms[pkg] = parent_atoms
4753                                 for parent_atom in all_parent_atoms:
4754                                         if parent_atom in parent_atoms:
4755                                                 continue
4756                                         # Use package set for matching since it will match via
4757                                         # PROVIDE when necessary, while match_from_list does not.
4758                                         parent, atom = parent_atom
4759                                         atom_set = InternalPackageSet(
4760                                                 initial_atoms=(atom,))
4761                                         if atom_set.findAtomForPackage(pkg):
4762                                                 parent_atoms.add(parent_atom)
4763                                         else:
4764                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4765
4766         def _reinstall_for_flags(self, forced_flags,
4767                 orig_use, orig_iuse, cur_use, cur_iuse):
4768                 """Return a set of flags that trigger reinstallation, or None if there
4769                 are no such flags."""
4770                 if "--newuse" in self.myopts:
4771                         flags = set(orig_iuse.symmetric_difference(
4772                                 cur_iuse).difference(forced_flags))
4773                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4774                                 cur_iuse.intersection(cur_use)))
4775                         if flags:
4776                                 return flags
4777                 elif "changed-use" == self.myopts.get("--reinstall"):
4778                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4779                                 cur_iuse.intersection(cur_use))
4780                         if flags:
4781                                 return flags
4782                 return None
4783
4784         def _create_graph(self, allow_unsatisfied=False):
4785                 dep_stack = self._dep_stack
4786                 while dep_stack:
4787                         self.spinner.update()
4788                         dep = dep_stack.pop()
4789                         if isinstance(dep, Package):
4790                                 if not self._add_pkg_deps(dep,
4791                                         allow_unsatisfied=allow_unsatisfied):
4792                                         return 0
4793                                 continue
4794                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4795                                 return 0
4796                 return 1
4797
4798         def _add_dep(self, dep, allow_unsatisfied=False):
4799                 debug = "--debug" in self.myopts
4800                 buildpkgonly = "--buildpkgonly" in self.myopts
4801                 nodeps = "--nodeps" in self.myopts
4802                 empty = "empty" in self.myparams
4803                 deep = "deep" in self.myparams
4804                 update = "--update" in self.myopts and dep.depth <= 1
4805                 if dep.blocker:
4806                         if not buildpkgonly and \
4807                                 not nodeps and \
4808                                 dep.parent not in self._slot_collision_nodes:
4809                                 if dep.parent.onlydeps:
4810                                         # It's safe to ignore blockers if the
4811                                         # parent is an --onlydeps node.
4812                                         return 1
4813                                 # The blocker applies to the root where
4814                                 # the parent is or will be installed.
4815                                 blocker = Blocker(atom=dep.atom,
4816                                         eapi=dep.parent.metadata["EAPI"],
4817                                         root=dep.parent.root)
4818                                 self._blocker_parents.add(blocker, dep.parent)
4819                         return 1
4820                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4821                         onlydeps=dep.onlydeps)
4822                 if not dep_pkg:
4823                         if allow_unsatisfied:
4824                                 self._unsatisfied_deps.append(dep)
4825                                 return 1
4826                         self._unsatisfied_deps_for_display.append(
4827                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4828                         return 0
4829                 # In some cases, dep_check will return deps that shouldn't
4830                 # be proccessed any further, so they are identified and
4831                 # discarded here. Try to discard as few as possible since
4832                 # discarded dependencies reduce the amount of information
4833                 # available for optimization of merge order.
4834                 if dep.priority.satisfied and \
4835                         not (existing_node or empty or deep or update):
4836                         myarg = None
4837                         if dep.root == self.target_root:
4838                                 try:
4839                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4840                                 except StopIteration:
4841                                         pass
4842                                 except portage.exception.InvalidDependString:
4843                                         if not dep_pkg.installed:
4844                                                 # This shouldn't happen since the package
4845                                                 # should have been masked.
4846                                                 raise
4847                         if not myarg:
4848                                 self._ignored_deps.append(dep)
4849                                 return 1
4850
4851                 if not self._add_pkg(dep_pkg, dep):
4852                         return 0
4853                 return 1
4854
4855         def _add_pkg(self, pkg, dep):
4856                 myparent = None
4857                 priority = None
4858                 depth = 0
4859                 if dep is None:
4860                         dep = Dependency()
4861                 else:
4862                         myparent = dep.parent
4863                         priority = dep.priority
4864                         depth = dep.depth
4865                 if priority is None:
4866                         priority = DepPriority()
4867                 """
4868                 Fills the digraph with nodes comprised of packages to merge.
4869                 mybigkey is the package spec of the package to merge.
4870                 myparent is the package depending on mybigkey ( or None )
4871                 addme = Should we add this package to the digraph or are we just looking at it's deps?
4872                         Think --onlydeps, we need to ignore packages in that case.
4873                 #stuff to add:
4874                 #SLOT-aware emerge
4875                 #IUSE-aware emerge -> USE DEP aware depgraph
4876                 #"no downgrade" emerge
4877                 """
4878                 # Ensure that the dependencies of the same package
4879                 # are never processed more than once.
4880                 previously_added = pkg in self.digraph
4881
4882                 # select the correct /var database that we'll be checking against
4883                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4884                 pkgsettings = self.pkgsettings[pkg.root]
4885
4886                 arg_atoms = None
4887                 if True:
4888                         try:
4889                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4890                         except portage.exception.InvalidDependString, e:
4891                                 if not pkg.installed:
4892                                         show_invalid_depstring_notice(
4893                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4894                                         return 0
4895                                 del e
4896
4897                 if not pkg.onlydeps:
4898                         if not pkg.installed and \
4899                                 "empty" not in self.myparams and \
4900                                 vardbapi.match(pkg.slot_atom):
4901                                 # Increase the priority of dependencies on packages that
4902                                 # are being rebuilt. This optimizes merge order so that
4903                                 # dependencies are rebuilt/updated as soon as possible,
4904                                 # which is needed especially when emerge is called by
4905                                 # revdep-rebuild since dependencies may be affected by ABI
4906                                 # breakage that has rendered them useless. Don't adjust
4907                                 # priority here when in "empty" mode since all packages
4908                                 # are being merged in that case.
4909                                 priority.rebuild = True
4910
4911                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4912                         slot_collision = False
4913                         if existing_node:
4914                                 existing_node_matches = pkg.cpv == existing_node.cpv
4915                                 if existing_node_matches and \
4916                                         pkg != existing_node and \
4917                                         dep.atom is not None:
4918                                         # Use package set for matching since it will match via
4919                                         # PROVIDE when necessary, while match_from_list does not.
4920                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4921                                         if not atom_set.findAtomForPackage(existing_node):
4922                                                 existing_node_matches = False
4923                                 if existing_node_matches:
4924                                         # The existing node can be reused.
4925                                         if arg_atoms:
4926                                                 for parent_atom in arg_atoms:
4927                                                         parent, atom = parent_atom
4928                                                         self.digraph.add(existing_node, parent,
4929                                                                 priority=priority)
4930                                                         self._add_parent_atom(existing_node, parent_atom)
4931                                         # If a direct circular dependency is not an unsatisfied
4932                                         # buildtime dependency then drop it here since otherwise
4933                                         # it can skew the merge order calculation in an unwanted
4934                                         # way.
4935                                         if existing_node != myparent or \
4936                                                 (priority.buildtime and not priority.satisfied):
4937                                                 self.digraph.addnode(existing_node, myparent,
4938                                                         priority=priority)
4939                                                 if dep.atom is not None and dep.parent is not None:
4940                                                         self._add_parent_atom(existing_node,
4941                                                                 (dep.parent, dep.atom))
4942                                         return 1
4943                                 else:
4944
4945                                         # A slot collision has occurred.  Sometimes this coincides
4946                                         # with unresolvable blockers, so the slot collision will be
4947                                         # shown later if there are no unresolvable blockers.
4948                                         self._add_slot_conflict(pkg)
4949                                         slot_collision = True
4950
4951                         if slot_collision:
4952                                 # Now add this node to the graph so that self.display()
4953                                 # can show use flags and --tree portage.output.  This node is
4954                                 # only being partially added to the graph.  It must not be
4955                                 # allowed to interfere with the other nodes that have been
4956                                 # added.  Do not overwrite data for existing nodes in
4957                                 # self.mydbapi since that data will be used for blocker
4958                                 # validation.
4959                                 # Even though the graph is now invalid, continue to process
4960                                 # dependencies so that things like --fetchonly can still
4961                                 # function despite collisions.
4962                                 pass
4963                         else:
4964                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4965                                 self.mydbapi[pkg.root].cpv_inject(pkg)
4966
4967                         if not pkg.installed:
4968                                 # Allow this package to satisfy old-style virtuals in case it
4969                                 # doesn't already. Any pre-existing providers will be preferred
4970                                 # over this one.
4971                                 try:
4972                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
4973                                         # For consistency, also update the global virtuals.
4974                                         settings = self.roots[pkg.root].settings
4975                                         settings.unlock()
4976                                         settings.setinst(pkg.cpv, pkg.metadata)
4977                                         settings.lock()
4978                                 except portage.exception.InvalidDependString, e:
4979                                         show_invalid_depstring_notice(
4980                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4981                                         del e
4982                                         return 0
4983
4984                 if arg_atoms:
4985                         self._set_nodes.add(pkg)
4986
4987                 # Do this even when addme is False (--onlydeps) so that the
4988                 # parent/child relationship is always known in case
4989                 # self._show_slot_collision_notice() needs to be called later.
4990                 self.digraph.add(pkg, myparent, priority=priority)
4991                 if dep.atom is not None and dep.parent is not None:
4992                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
4993
4994                 if arg_atoms:
4995                         for parent_atom in arg_atoms:
4996                                 parent, atom = parent_atom
4997                                 self.digraph.add(pkg, parent, priority=priority)
4998                                 self._add_parent_atom(pkg, parent_atom)
4999
5000                 """ This section determines whether we go deeper into dependencies or not.
5001                     We want to go deeper on a few occasions:
5002                     Installing package A, we need to make sure package A's deps are met.
5003                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5004                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5005                 """
5006                 dep_stack = self._dep_stack
5007                 if "recurse" not in self.myparams:
5008                         return 1
5009                 elif pkg.installed and \
5010                         "deep" not in self.myparams:
5011                         dep_stack = self._ignored_deps
5012
5013                 self.spinner.update()
5014
5015                 if arg_atoms:
5016                         depth = 0
5017                 pkg.depth = depth
5018                 if not previously_added:
5019                         dep_stack.append(pkg)
5020                 return 1
5021
5022         def _add_parent_atom(self, pkg, parent_atom):
5023                 parent_atoms = self._parent_atoms.get(pkg)
5024                 if parent_atoms is None:
5025                         parent_atoms = set()
5026                         self._parent_atoms[pkg] = parent_atoms
5027                 parent_atoms.add(parent_atom)
5028
5029         def _add_slot_conflict(self, pkg):
5030                 self._slot_collision_nodes.add(pkg)
5031                 slot_key = (pkg.slot_atom, pkg.root)
5032                 slot_nodes = self._slot_collision_info.get(slot_key)
5033                 if slot_nodes is None:
5034                         slot_nodes = set()
5035                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5036                         self._slot_collision_info[slot_key] = slot_nodes
5037                 slot_nodes.add(pkg)
5038
5039         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5040
5041                 mytype = pkg.type_name
5042                 myroot = pkg.root
5043                 mykey = pkg.cpv
5044                 metadata = pkg.metadata
5045                 myuse = pkg.use.enabled
5046                 jbigkey = pkg
5047                 depth = pkg.depth + 1
5048                 removal_action = "remove" in self.myparams
5049
5050                 edepend={}
5051                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5052                 for k in depkeys:
5053                         edepend[k] = metadata[k]
5054
5055                 if not pkg.built and \
5056                         "--buildpkgonly" in self.myopts and \
5057                         "deep" not in self.myparams and \
5058                         "empty" not in self.myparams:
5059                         edepend["RDEPEND"] = ""
5060                         edepend["PDEPEND"] = ""
5061                 bdeps_satisfied = False
5062                 
5063                 if pkg.built and not removal_action:
5064                         if self.myopts.get("--with-bdeps", "n") == "y":
5065                                 # Pull in build time deps as requested, but marked them as
5066                                 # "satisfied" since they are not strictly required. This allows
5067                                 # more freedom in the merge order calculation for solving
5068                                 # circular dependencies. Don't convert to PDEPEND since that
5069                                 # could make --with-bdeps=y less effective if it is used to
5070                                 # adjust merge order to prevent built_with_use() calls from
5071                                 # failing.
5072                                 bdeps_satisfied = True
5073                         else:
5074                                 # built packages do not have build time dependencies.
5075                                 edepend["DEPEND"] = ""
5076
5077                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5078                         edepend["DEPEND"] = ""
5079
5080                 deps = (
5081                         ("/", edepend["DEPEND"],
5082                                 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5083                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5084                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5085                 )
5086
5087                 debug = "--debug" in self.myopts
5088                 strict = mytype != "installed"
5089                 try:
5090                         for dep_root, dep_string, dep_priority in deps:
5091                                 if pkg.onlydeps:
5092                                         # Decrease priority so that --buildpkgonly
5093                                         # hasallzeros() works correctly.
5094                                         dep_priority = DepPriority()
5095                                 if not dep_string:
5096                                         continue
5097                                 if debug:
5098                                         print
5099                                         print "Parent:   ", jbigkey
5100                                         print "Depstring:", dep_string
5101                                         print "Priority:", dep_priority
5102                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5103                                 try:
5104                                         selected_atoms = self._select_atoms(dep_root,
5105                                                 dep_string, myuse=myuse, parent=pkg, strict=strict)
5106                                 except portage.exception.InvalidDependString, e:
5107                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5108                                         return 0
5109                                 if debug:
5110                                         print "Candidates:", selected_atoms
5111
5112                                 for atom in selected_atoms:
5113                                         try:
5114
5115                                                 atom = portage.dep.Atom(atom)
5116
5117                                                 mypriority = dep_priority.copy()
5118                                                 if not atom.blocker and vardb.match(atom):
5119                                                         mypriority.satisfied = True
5120
5121                                                 if not self._add_dep(Dependency(atom=atom,
5122                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5123                                                         priority=mypriority, root=dep_root),
5124                                                         allow_unsatisfied=allow_unsatisfied):
5125                                                         return 0
5126
5127                                         except portage.exception.InvalidAtom, e:
5128                                                 show_invalid_depstring_notice(
5129                                                         pkg, dep_string, str(e))
5130                                                 del e
5131                                                 if not pkg.installed:
5132                                                         return 0
5133
5134                                 if debug:
5135                                         print "Exiting...", jbigkey
5136                 except portage.exception.AmbiguousPackageName, e:
5137                         pkgs = e.args[0]
5138                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5139                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5140                         for cpv in pkgs:
5141                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5142                         portage.writemsg("\n", noiselevel=-1)
5143                         if mytype == "binary":
5144                                 portage.writemsg(
5145                                         "!!! This binary package cannot be installed: '%s'\n" % \
5146                                         mykey, noiselevel=-1)
5147                         elif mytype == "ebuild":
5148                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5149                                 myebuild, mylocation = portdb.findname2(mykey)
5150                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5151                                         "'%s'\n" % myebuild, noiselevel=-1)
5152                         portage.writemsg("!!! Please notify the package maintainer " + \
5153                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5154                         return 0
5155                 return 1
5156
5157         def _priority(self, **kwargs):
5158                 if "remove" in self.myparams:
5159                         priority_constructor = UnmergeDepPriority
5160                 else:
5161                         priority_constructor = DepPriority
5162                 return priority_constructor(**kwargs)
5163
5164         def _dep_expand(self, root_config, atom_without_category):
5165                 """
5166                 @param root_config: a root config instance
5167                 @type root_config: RootConfig
5168                 @param atom_without_category: an atom without a category component
5169                 @type atom_without_category: String
5170                 @rtype: list
5171                 @returns: a list of atoms containing categories (possibly empty)
5172                 """
5173                 null_cp = portage.dep_getkey(insert_category_into_atom(
5174                         atom_without_category, "null"))
5175                 cat, atom_pn = portage.catsplit(null_cp)
5176
5177                 cp_set = set()
5178                 for db, pkg_type, built, installed, db_keys in \
5179                         self._filtered_trees[root_config.root]["dbs"]:
5180                         cp_set.update(db.cp_all())
5181                 for cp in list(cp_set):
5182                         cat, pn = portage.catsplit(cp)
5183                         if pn != atom_pn:
5184                                 cp_set.discard(cp)
5185                 deps = []
5186                 for cp in cp_set:
5187                         cat, pn = portage.catsplit(cp)
5188                         deps.append(insert_category_into_atom(
5189                                 atom_without_category, cat))
5190                 return deps
5191
5192         def _have_new_virt(self, root, atom_cp):
5193                 ret = False
5194                 for db, pkg_type, built, installed, db_keys in \
5195                         self._filtered_trees[root]["dbs"]:
5196                         if db.cp_list(atom_cp):
5197                                 ret = True
5198                                 break
5199                 return ret
5200
5201         def _iter_atoms_for_pkg(self, pkg):
5202                 # TODO: add multiple $ROOT support
5203                 if pkg.root != self.target_root:
5204                         return
5205                 atom_arg_map = self._atom_arg_map
5206                 root_config = self.roots[pkg.root]
5207                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5208                         atom_cp = portage.dep_getkey(atom)
5209                         if atom_cp != pkg.cp and \
5210                                 self._have_new_virt(pkg.root, atom_cp):
5211                                 continue
5212                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5213                         visible_pkgs.reverse() # descending order
5214                         higher_slot = None
5215                         for visible_pkg in visible_pkgs:
5216                                 if visible_pkg.cp != atom_cp:
5217                                         continue
5218                                 if pkg >= visible_pkg:
5219                                         # This is descending order, and we're not
5220                                         # interested in any versions <= pkg given.
5221                                         break
5222                                 if pkg.slot_atom != visible_pkg.slot_atom:
5223                                         higher_slot = visible_pkg
5224                                         break
5225                         if higher_slot is not None:
5226                                 continue
5227                         for arg in atom_arg_map[(atom, pkg.root)]:
5228                                 if isinstance(arg, PackageArg) and \
5229                                         arg.package != pkg:
5230                                         continue
5231                                 yield arg, atom
5232
5233         def select_files(self, myfiles):
5234                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5235                 appropriate depgraph and return a favorite list."""
5236                 debug = "--debug" in self.myopts
5237                 root_config = self.roots[self.target_root]
5238                 sets = root_config.sets
5239                 getSetAtoms = root_config.setconfig.getSetAtoms
5240                 myfavorites=[]
5241                 myroot = self.target_root
5242                 dbs = self._filtered_trees[myroot]["dbs"]
5243                 vardb = self.trees[myroot]["vartree"].dbapi
5244                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5245                 portdb = self.trees[myroot]["porttree"].dbapi
5246                 bindb = self.trees[myroot]["bintree"].dbapi
5247                 pkgsettings = self.pkgsettings[myroot]
5248                 args = []
5249                 onlydeps = "--onlydeps" in self.myopts
5250                 lookup_owners = []
5251                 for x in myfiles:
5252                         ext = os.path.splitext(x)[1]
5253                         if ext==".tbz2":
5254                                 if not os.path.exists(x):
5255                                         if os.path.exists(
5256                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5257                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5258                                         elif os.path.exists(
5259                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5260                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5261                                         else:
5262                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5263                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5264                                                 return 0, myfavorites
5265                                 mytbz2=portage.xpak.tbz2(x)
5266                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5267                                 if os.path.realpath(x) != \
5268                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5269                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5270                                         return 0, myfavorites
5271                                 db_keys = list(bindb._aux_cache_keys)
5272                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5273                                 pkg = Package(type_name="binary", root_config=root_config,
5274                                         cpv=mykey, built=True, metadata=metadata,
5275                                         onlydeps=onlydeps)
5276                                 self._pkg_cache[pkg] = pkg
5277                                 args.append(PackageArg(arg=x, package=pkg,
5278                                         root_config=root_config))
5279                         elif ext==".ebuild":
5280                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5281                                 pkgdir = os.path.dirname(ebuild_path)
5282                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5283                                 cp = pkgdir[len(tree_root)+1:]
5284                                 e = portage.exception.PackageNotFound(
5285                                         ("%s is not in a valid portage tree " + \
5286                                         "hierarchy or does not exist") % x)
5287                                 if not portage.isvalidatom(cp):
5288                                         raise e
5289                                 cat = portage.catsplit(cp)[0]
5290                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5291                                 if not portage.isvalidatom("="+mykey):
5292                                         raise e
5293                                 ebuild_path = portdb.findname(mykey)
5294                                 if ebuild_path:
5295                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5296                                                 cp, os.path.basename(ebuild_path)):
5297                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5298                                                 return 0, myfavorites
5299                                         if mykey not in portdb.xmatch(
5300                                                 "match-visible", portage.dep_getkey(mykey)):
5301                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5302                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5303                                                 print colorize("BAD", "*** page for details.")
5304                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5305                                                         "Continuing...")
5306                                 else:
5307                                         raise portage.exception.PackageNotFound(
5308                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5309                                 db_keys = list(portdb._aux_cache_keys)
5310                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5311                                 pkg = Package(type_name="ebuild", root_config=root_config,
5312                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5313                                 pkgsettings.setcpv(pkg)
5314                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5315                                 self._pkg_cache[pkg] = pkg
5316                                 args.append(PackageArg(arg=x, package=pkg,
5317                                         root_config=root_config))
5318                         elif x.startswith(os.path.sep):
5319                                 if not x.startswith(myroot):
5320                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5321                                                 " $ROOT.\n") % x, noiselevel=-1)
5322                                         return 0, []
5323                                 # Queue these up since it's most efficient to handle
5324                                 # multiple files in a single iter_owners() call.
5325                                 lookup_owners.append(x)
5326                         else:
5327                                 if x in ("system", "world"):
5328                                         x = SETPREFIX + x
5329                                 if x.startswith(SETPREFIX):
5330                                         s = x[len(SETPREFIX):]
5331                                         if s not in sets:
5332                                                 raise portage.exception.PackageSetNotFound(s)
5333                                         if s in self._sets:
5334                                                 continue
5335                                         # Recursively expand sets so that containment tests in
5336                                         # self._get_parent_sets() properly match atoms in nested
5337                                         # sets (like if world contains system).
5338                                         expanded_set = InternalPackageSet(
5339                                                 initial_atoms=getSetAtoms(s))
5340                                         self._sets[s] = expanded_set
5341                                         args.append(SetArg(arg=x, set=expanded_set,
5342                                                 root_config=root_config))
5343                                         myfavorites.append(x)
5344                                         continue
5345                                 if not is_valid_package_atom(x):
5346                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5347                                                 noiselevel=-1)
5348                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5349                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5350                                         return (0,[])
5351                                 # Don't expand categories or old-style virtuals here unless
5352                                 # necessary. Expansion of old-style virtuals here causes at
5353                                 # least the following problems:
5354                                 #   1) It's more difficult to determine which set(s) an atom
5355                                 #      came from, if any.
5356                                 #   2) It takes away freedom from the resolver to choose other
5357                                 #      possible expansions when necessary.
5358                                 if "/" in x:
5359                                         args.append(AtomArg(arg=x, atom=x,
5360                                                 root_config=root_config))
5361                                         continue
5362                                 expanded_atoms = self._dep_expand(root_config, x)
5363                                 installed_cp_set = set()
5364                                 for atom in expanded_atoms:
5365                                         atom_cp = portage.dep_getkey(atom)
5366                                         if vardb.cp_list(atom_cp):
5367                                                 installed_cp_set.add(atom_cp)
5368                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5369                                         installed_cp = iter(installed_cp_set).next()
5370                                         expanded_atoms = [atom for atom in expanded_atoms \
5371                                                 if portage.dep_getkey(atom) == installed_cp]
5372
5373                                 if len(expanded_atoms) > 1:
5374                                         print
5375                                         print
5376                                         ambiguous_package_name(x, expanded_atoms, root_config,
5377                                                 self.spinner, self.myopts)
5378                                         return False, myfavorites
5379                                 if expanded_atoms:
5380                                         atom = expanded_atoms[0]
5381                                 else:
5382                                         null_atom = insert_category_into_atom(x, "null")
5383                                         null_cp = portage.dep_getkey(null_atom)
5384                                         cat, atom_pn = portage.catsplit(null_cp)
5385                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5386                                         if virts_p:
5387                                                 # Allow the depgraph to choose which virtual.
5388                                                 atom = insert_category_into_atom(x, "virtual")
5389                                         else:
5390                                                 atom = insert_category_into_atom(x, "null")
5391
5392                                 args.append(AtomArg(arg=x, atom=atom,
5393                                         root_config=root_config))
5394
5395                 if lookup_owners:
5396                         relative_paths = []
5397                         search_for_multiple = False
5398                         if len(lookup_owners) > 1:
5399                                 search_for_multiple = True
5400
5401                         for x in lookup_owners:
5402                                 if not search_for_multiple and os.path.isdir(x):
5403                                         search_for_multiple = True
5404                                 relative_paths.append(x[len(myroot):])
5405
5406                         owners = set()
5407                         for pkg, relative_path in \
5408                                 real_vardb._owners.iter_owners(relative_paths):
5409                                 owners.add(pkg.mycpv)
5410                                 if not search_for_multiple:
5411                                         break
5412
5413                         if not owners:
5414                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5415                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5416                                 return 0, []
5417
5418                         for cpv in owners:
5419                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5420                                 if not slot:
5421                                         # portage now masks packages with missing slot, but it's
5422                                         # possible that one was installed by an older version
5423                                         atom = portage.cpv_getkey(cpv)
5424                                 else:
5425                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5426                                 args.append(AtomArg(arg=atom, atom=atom,
5427                                         root_config=root_config))
5428
5429                 if "--update" in self.myopts:
5430                         # Enable greedy SLOT atoms for atoms given as arguments.
5431                         # This is currently disabled for sets since greedy SLOT
5432                         # atoms could be a property of the set itself.
5433                         greedy_atoms = []
5434                         for arg in args:
5435                                 # In addition to any installed slots, also try to pull
5436                                 # in the latest new slot that may be available.
5437                                 greedy_atoms.append(arg)
5438                                 if not isinstance(arg, (AtomArg, PackageArg)):
5439                                         continue
5440                                 atom_cp = portage.dep_getkey(arg.atom)
5441                                 slots = set()
5442                                 for cpv in vardb.match(arg.atom):
5443                                         slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5444                                 for slot in slots:
5445                                         greedy_atoms.append(
5446                                                 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5447                                                         root_config=root_config))
5448                         args = greedy_atoms
5449                         del greedy_atoms
5450
5451                 # Create the "args" package set from atoms and
5452                 # packages given as arguments.
5453                 args_set = self._sets["args"]
5454                 for arg in args:
5455                         if not isinstance(arg, (AtomArg, PackageArg)):
5456                                 continue
5457                         myatom = arg.atom
5458                         if myatom in args_set:
5459                                 continue
5460                         args_set.add(myatom)
5461                         myfavorites.append(myatom)
5462                 self._set_atoms.update(chain(*self._sets.itervalues()))
5463                 atom_arg_map = self._atom_arg_map
5464                 for arg in args:
5465                         for atom in arg.set:
5466                                 atom_key = (atom, myroot)
5467                                 refs = atom_arg_map.get(atom_key)
5468                                 if refs is None:
5469                                         refs = []
5470                                         atom_arg_map[atom_key] = refs
5471                                         if arg not in refs:
5472                                                 refs.append(arg)
5473                 pprovideddict = pkgsettings.pprovideddict
5474                 if debug:
5475                         portage.writemsg("\n", noiselevel=-1)
5476                 # Order needs to be preserved since a feature of --nodeps
5477                 # is to allow the user to force a specific merge order.
5478                 args.reverse()
5479                 while args:
5480                         arg = args.pop()
5481                         for atom in arg.set:
5482                                 self.spinner.update()
5483                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5484                                         root=myroot, parent=arg)
5485                                 atom_cp = portage.dep_getkey(atom)
5486                                 try:
5487                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5488                                         if pprovided and portage.match_from_list(atom, pprovided):
5489                                                 # A provided package has been specified on the command line.
5490                                                 self._pprovided_args.append((arg, atom))
5491                                                 continue
5492                                         if isinstance(arg, PackageArg):
5493                                                 if not self._add_pkg(arg.package, dep) or \
5494                                                         not self._create_graph():
5495                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5496                                                                 "dependencies for %s\n") % arg.arg)
5497                                                         return 0, myfavorites
5498                                                 continue
5499                                         if debug:
5500                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5501                                                         (arg, atom), noiselevel=-1)
5502                                         pkg, existing_node = self._select_package(
5503                                                 myroot, atom, onlydeps=onlydeps)
5504                                         if not pkg:
5505                                                 if not (isinstance(arg, SetArg) and \
5506                                                         arg.name in ("system", "world")):
5507                                                         self._unsatisfied_deps_for_display.append(
5508                                                                 ((myroot, atom), {}))
5509                                                         return 0, myfavorites
5510                                                 self._missing_args.append((arg, atom))
5511                                                 continue
5512                                         if atom_cp != pkg.cp:
5513                                                 # For old-style virtuals, we need to repeat the
5514                                                 # package.provided check against the selected package.
5515                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5516                                                 pprovided = pprovideddict.get(pkg.cp)
5517                                                 if pprovided and \
5518                                                         portage.match_from_list(expanded_atom, pprovided):
5519                                                         # A provided package has been
5520                                                         # specified on the command line.
5521                                                         self._pprovided_args.append((arg, atom))
5522                                                         continue
5523                                         if pkg.installed and "selective" not in self.myparams:
5524                                                 self._unsatisfied_deps_for_display.append(
5525                                                         ((myroot, atom), {}))
5526                                                 # Previous behavior was to bail out in this case, but
5527                                                 # since the dep is satisfied by the installed package,
5528                                                 # it's more friendly to continue building the graph
5529                                                 # and just show a warning message. Therefore, only bail
5530                                                 # out here if the atom is not from either the system or
5531                                                 # world set.
5532                                                 if not (isinstance(arg, SetArg) and \
5533                                                         arg.name in ("system", "world")):
5534                                                         return 0, myfavorites
5535
5536                                         # Add the selected package to the graph as soon as possible
5537                                         # so that later dep_check() calls can use it as feedback
5538                                         # for making more consistent atom selections.
5539                                         if not self._add_pkg(pkg, dep):
5540                                                 if isinstance(arg, SetArg):
5541                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5542                                                                 "dependencies for %s from %s\n") % \
5543                                                                 (atom, arg.arg))
5544                                                 else:
5545                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5546                                                                 "dependencies for %s\n") % atom)
5547                                                 return 0, myfavorites
5548
5549                                 except portage.exception.MissingSignature, e:
5550                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5551                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5552                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5553                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5554                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5555                                         return 0, myfavorites
5556                                 except portage.exception.InvalidSignature, e:
5557                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5558                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5559                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5560                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5561                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5562                                         return 0, myfavorites
5563                                 except SystemExit, e:
5564                                         raise # Needed else can't exit
5565                                 except Exception, e:
5566                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5567                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5568                                         raise
5569
5570                 # Now that the root packages have been added to the graph,
5571                 # process the dependencies.
5572                 if not self._create_graph():
5573                         return 0, myfavorites
5574
5575                 missing=0
5576                 if "--usepkgonly" in self.myopts:
5577                         for xs in self.digraph.all_nodes():
5578                                 if not isinstance(xs, Package):
5579                                         continue
5580                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5581                                         if missing == 0:
5582                                                 print
5583                                         missing += 1
5584                                         print "Missing binary for:",xs[2]
5585
5586                 try:
5587                         self.altlist()
5588                 except self._unknown_internal_error:
5589                         return False, myfavorites
5590
5591                 # We're true here unless we are missing binaries.
5592                 return (not missing,myfavorites)
5593
5594         def _select_atoms_from_graph(self, *pargs, **kwargs):
5595                 """
5596                 Prefer atoms matching packages that have already been
5597                 added to the graph or those that are installed and have
5598                 not been scheduled for replacement.
5599                 """
5600                 kwargs["trees"] = self._graph_trees
5601                 return self._select_atoms_highest_available(*pargs, **kwargs)
5602
5603         def _select_atoms_highest_available(self, root, depstring,
5604                 myuse=None, parent=None, strict=True, trees=None):
5605                 """This will raise InvalidDependString if necessary. If trees is
5606                 None then self._filtered_trees is used."""
5607                 pkgsettings = self.pkgsettings[root]
5608                 if trees is None:
5609                         trees = self._filtered_trees
5610                 if True:
5611                         try:
5612                                 if parent is not None:
5613                                         trees[root]["parent"] = parent
5614                                 if not strict:
5615                                         portage.dep._dep_check_strict = False
5616                                 mycheck = portage.dep_check(depstring, None,
5617                                         pkgsettings, myuse=myuse,
5618                                         myroot=root, trees=trees)
5619                         finally:
5620                                 if parent is not None:
5621                                         trees[root].pop("parent")
5622                                 portage.dep._dep_check_strict = True
5623                         if not mycheck[0]:
5624                                 raise portage.exception.InvalidDependString(mycheck[1])
5625                         selected_atoms = mycheck[1]
5626                 return selected_atoms
5627
5628         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5629                 atom = portage.dep.Atom(atom)
5630                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5631                 atom_without_use = atom
5632                 if atom.use:
5633                         atom_without_use = portage.dep.remove_slot(atom)
5634                         if atom.slot:
5635                                 atom_without_use += ":" + atom.slot
5636                         atom_without_use = portage.dep.Atom(atom_without_use)
5637                 xinfo = '"%s"' % atom
5638                 if arg:
5639                         xinfo='"%s"' % arg
5640                 # Discard null/ from failed cpv_expand category expansion.
5641                 xinfo = xinfo.replace("null/", "")
5642                 masked_packages = []
5643                 missing_use = []
5644                 missing_licenses = []
5645                 have_eapi_mask = False
5646                 pkgsettings = self.pkgsettings[root]
5647                 implicit_iuse = pkgsettings._get_implicit_iuse()
5648                 root_config = self.roots[root]
5649                 portdb = self.roots[root].trees["porttree"].dbapi
5650                 dbs = self._filtered_trees[root]["dbs"]
5651                 for db, pkg_type, built, installed, db_keys in dbs:
5652                         if installed:
5653                                 continue
5654                         match = db.match
5655                         if hasattr(db, "xmatch"):
5656                                 cpv_list = db.xmatch("match-all", atom_without_use)
5657                         else:
5658                                 cpv_list = db.match(atom_without_use)
5659                         # descending order
5660                         cpv_list.reverse()
5661                         for cpv in cpv_list:
5662                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5663                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5664                                 if metadata is not None:
5665                                         pkg = Package(built=built, cpv=cpv,
5666                                                 installed=installed, metadata=metadata,
5667                                                 root_config=root_config)
5668                                         if pkg.cp != atom.cp:
5669                                                 # A cpv can be returned from dbapi.match() as an
5670                                                 # old-style virtual match even in cases when the
5671                                                 # package does not actually PROVIDE the virtual.
5672                                                 # Filter out any such false matches here.
5673                                                 if not atom_set.findAtomForPackage(pkg):
5674                                                         continue
5675                                         if atom.use and not mreasons:
5676                                                 missing_use.append(pkg)
5677                                                 continue
5678                                 masked_packages.append(
5679                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5680
5681                 missing_use_reasons = []
5682                 missing_iuse_reasons = []
5683                 for pkg in missing_use:
5684                         use = pkg.use.enabled
5685                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5686                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5687                         missing_iuse = []
5688                         for x in atom.use.required:
5689                                 if iuse_re.match(x) is None:
5690                                         missing_iuse.append(x)
5691                         mreasons = []
5692                         if missing_iuse:
5693                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5694                                 missing_iuse_reasons.append((pkg, mreasons))
5695                         else:
5696                                 need_enable = sorted(atom.use.enabled.difference(use))
5697                                 need_disable = sorted(atom.use.disabled.intersection(use))
5698                                 if need_enable or need_disable:
5699                                         changes = []
5700                                         changes.extend(colorize("red", "+" + x) \
5701                                                 for x in need_enable)
5702                                         changes.extend(colorize("blue", "-" + x) \
5703                                                 for x in need_disable)
5704                                         mreasons.append("Change USE: %s" % " ".join(changes))
5705                                         missing_use_reasons.append((pkg, mreasons))
5706
5707                 if missing_iuse_reasons and not missing_use_reasons:
5708                         missing_use_reasons = missing_iuse_reasons
5709                 elif missing_use_reasons:
5710                         # Only show the latest version.
5711                         del missing_use_reasons[1:]
5712
5713                 if missing_use_reasons:
5714                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5715                         print "!!! One of the following packages is required to complete your request:"
5716                         for pkg, mreasons in missing_use_reasons:
5717                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5718
5719                 elif masked_packages:
5720                         print "\n!!! " + \
5721                                 colorize("BAD", "All ebuilds that could satisfy ") + \
5722                                 colorize("INFORM", xinfo) + \
5723                                 colorize("BAD", " have been masked.")
5724                         print "!!! One of the following masked packages is required to complete your request:"
5725                         have_eapi_mask = show_masked_packages(masked_packages)
5726                         if have_eapi_mask:
5727                                 print
5728                                 msg = ("The current version of portage supports " + \
5729                                         "EAPI '%s'. You must upgrade to a newer version" + \
5730                                         " of portage before EAPI masked packages can" + \
5731                                         " be installed.") % portage.const.EAPI
5732                                 from textwrap import wrap
5733                                 for line in wrap(msg, 75):
5734                                         print line
5735                         print
5736                         show_mask_docs()
5737                 else:
5738                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5739
5740                 # Show parent nodes and the argument that pulled them in.
5741                 traversed_nodes = set()
5742                 node = myparent
5743                 msg = []
5744                 while node is not None:
5745                         traversed_nodes.add(node)
5746                         msg.append('(dependency required by "%s" [%s])' % \
5747                                 (colorize('INFORM', str(node.cpv)), node.type_name))
5748                         # When traversing to parents, prefer arguments over packages
5749                         # since arguments are root nodes. Never traverse the same
5750                         # package twice, in order to prevent an infinite loop.
5751                         selected_parent = None
5752                         for parent in self.digraph.parent_nodes(node):
5753                                 if isinstance(parent, DependencyArg):
5754                                         msg.append('(dependency required by "%s" [argument])' % \
5755                                                 (colorize('INFORM', str(parent))))
5756                                         selected_parent = None
5757                                         break
5758                                 if parent not in traversed_nodes:
5759                                         selected_parent = parent
5760                         node = selected_parent
5761                 for line in msg:
5762                         print line
5763
5764                 print
5765
5766         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5767                 cache_key = (root, atom, onlydeps)
5768                 ret = self._highest_pkg_cache.get(cache_key)
5769                 if ret is not None:
5770                         pkg, existing = ret
5771                         if pkg and not existing:
5772                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5773                                 if existing and existing == pkg:
5774                                         # Update the cache to reflect that the
5775                                         # package has been added to the graph.
5776                                         ret = pkg, pkg
5777                                         self._highest_pkg_cache[cache_key] = ret
5778                         return ret
5779                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5780                 self._highest_pkg_cache[cache_key] = ret
5781                 pkg, existing = ret
5782                 if pkg is not None:
5783                         settings = pkg.root_config.settings
5784                         if visible(settings, pkg) and not (pkg.installed and \
5785                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5786                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5787                 return ret
5788
5789         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5790                 root_config = self.roots[root]
5791                 pkgsettings = self.pkgsettings[root]
5792                 dbs = self._filtered_trees[root]["dbs"]
5793                 vardb = self.roots[root].trees["vartree"].dbapi
5794                 portdb = self.roots[root].trees["porttree"].dbapi
5795                 # List of acceptable packages, ordered by type preference.
5796                 matched_packages = []
5797                 highest_version = None
5798                 if not isinstance(atom, portage.dep.Atom):
5799                         atom = portage.dep.Atom(atom)
5800                 atom_cp = atom.cp
5801                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5802                 existing_node = None
5803                 myeb = None
5804                 usepkgonly = "--usepkgonly" in self.myopts
5805                 empty = "empty" in self.myparams
5806                 selective = "selective" in self.myparams
5807                 reinstall = False
5808                 noreplace = "--noreplace" in self.myopts
5809                 # Behavior of the "selective" parameter depends on
5810                 # whether or not a package matches an argument atom.
5811                 # If an installed package provides an old-style
5812                 # virtual that is no longer provided by an available
5813                 # package, the installed package may match an argument
5814                 # atom even though none of the available packages do.
5815                 # Therefore, "selective" logic does not consider
5816                 # whether or not an installed package matches an
5817                 # argument atom. It only considers whether or not
5818                 # available packages match argument atoms, which is
5819                 # represented by the found_available_arg flag.
5820                 found_available_arg = False
5821                 for find_existing_node in True, False:
5822                         if existing_node:
5823                                 break
5824                         for db, pkg_type, built, installed, db_keys in dbs:
5825                                 if existing_node:
5826                                         break
5827                                 if installed and not find_existing_node:
5828                                         want_reinstall = reinstall or empty or \
5829                                                 (found_available_arg and not selective)
5830                                         if want_reinstall and matched_packages:
5831                                                 continue
5832                                 if hasattr(db, "xmatch"):
5833                                         cpv_list = db.xmatch("match-all", atom)
5834                                 else:
5835                                         cpv_list = db.match(atom)
5836
5837                                 # USE=multislot can make an installed package appear as if
5838                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5839                                 # won't do any good as long as USE=multislot is enabled since
5840                                 # the newly built package still won't have the expected slot.
5841                                 # Therefore, assume that such SLOT dependencies are already
5842                                 # satisfied rather than forcing a rebuild.
5843                                 if installed and not cpv_list and atom.slot:
5844                                         for cpv in db.match(atom.cp):
5845                                                 slot_available = False
5846                                                 for other_db, other_type, other_built, \
5847                                                         other_installed, other_keys in dbs:
5848                                                         try:
5849                                                                 if atom.slot == \
5850                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
5851                                                                         slot_available = True
5852                                                                         break
5853                                                         except KeyError:
5854                                                                 pass
5855                                                 if not slot_available:
5856                                                         continue
5857                                                 inst_pkg = self._pkg(cpv, "installed",
5858                                                         root_config, installed=installed)
5859                                                 # Remove the slot from the atom and verify that
5860                                                 # the package matches the resulting atom.
5861                                                 atom_without_slot = portage.dep.remove_slot(atom)
5862                                                 if atom.use:
5863                                                         atom_without_slot += str(atom.use)
5864                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
5865                                                 if portage.match_from_list(
5866                                                         atom_without_slot, [inst_pkg]):
5867                                                         cpv_list = [inst_pkg.cpv]
5868                                                 break
5869
5870                                 if not cpv_list:
5871                                         continue
5872                                 pkg_status = "merge"
5873                                 if installed or onlydeps:
5874                                         pkg_status = "nomerge"
5875                                 # descending order
5876                                 cpv_list.reverse()
5877                                 for cpv in cpv_list:
5878                                         # Make --noreplace take precedence over --newuse.
5879                                         if not installed and noreplace and \
5880                                                 cpv in vardb.match(atom):
5881                                                 # If the installed version is masked, it may
5882                                                 # be necessary to look at lower versions,
5883                                                 # in case there is a visible downgrade.
5884                                                 continue
5885                                         reinstall_for_flags = None
5886                                         cache_key = (pkg_type, root, cpv, pkg_status)
5887                                         calculated_use = True
5888                                         pkg = self._pkg_cache.get(cache_key)
5889                                         if pkg is None:
5890                                                 calculated_use = False
5891                                                 try:
5892                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5893                                                 except KeyError:
5894                                                         continue
5895                                                 pkg = Package(built=built, cpv=cpv,
5896                                                         installed=installed, metadata=metadata,
5897                                                         onlydeps=onlydeps, root_config=root_config,
5898                                                         type_name=pkg_type)
5899                                                 metadata = pkg.metadata
5900                                                 if not built and ("?" in metadata["LICENSE"] or \
5901                                                         "?" in metadata["PROVIDE"]):
5902                                                         # This is avoided whenever possible because
5903                                                         # it's expensive. It only needs to be done here
5904                                                         # if it has an effect on visibility.
5905                                                         pkgsettings.setcpv(pkg)
5906                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
5907                                                         calculated_use = True
5908                                                 self._pkg_cache[pkg] = pkg
5909
5910                                         if not installed or (built and matched_packages):
5911                                                 # Only enforce visibility on installed packages
5912                                                 # if there is at least one other visible package
5913                                                 # available. By filtering installed masked packages
5914                                                 # here, packages that have been masked since they
5915                                                 # were installed can be automatically downgraded
5916                                                 # to an unmasked version.
5917                                                 try:
5918                                                         if not visible(pkgsettings, pkg):
5919                                                                 continue
5920                                                 except portage.exception.InvalidDependString:
5921                                                         if not installed:
5922                                                                 continue
5923
5924                                                 # Enable upgrade or downgrade to a version
5925                                                 # with visible KEYWORDS when the installed
5926                                                 # version is masked by KEYWORDS, but never
5927                                                 # reinstall the same exact version only due
5928                                                 # to a KEYWORDS mask.
5929                                                 if built and matched_packages:
5930
5931                                                         different_version = None
5932                                                         for avail_pkg in matched_packages:
5933                                                                 if not portage.dep.cpvequal(
5934                                                                         pkg.cpv, avail_pkg.cpv):
5935                                                                         different_version = avail_pkg
5936                                                                         break
5937                                                         if different_version is not None:
5938
5939                                                                 if installed and \
5940                                                                         pkgsettings._getMissingKeywords(
5941                                                                         pkg.cpv, pkg.metadata):
5942                                                                         continue
5943
5944                                                                 # If the ebuild no longer exists or it's
5945                                                                 # keywords have been dropped, reject built
5946                                                                 # instances (installed or binary).
5947                                                                 # If --usepkgonly is enabled, assume that
5948                                                                 # the ebuild status should be ignored.
5949                                                                 if not usepkgonly:
5950                                                                         try:
5951                                                                                 pkg_eb = self._pkg(
5952                                                                                         pkg.cpv, "ebuild", root_config)
5953                                                                         except portage.exception.PackageNotFound:
5954                                                                                 continue
5955                                                                         else:
5956                                                                                 if not visible(pkgsettings, pkg_eb):
5957                                                                                         continue
5958
5959                                         if not pkg.built and not calculated_use:
5960                                                 # This is avoided whenever possible because
5961                                                 # it's expensive.
5962                                                 pkgsettings.setcpv(pkg)
5963                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5964
5965                                         if pkg.cp != atom.cp:
5966                                                 # A cpv can be returned from dbapi.match() as an
5967                                                 # old-style virtual match even in cases when the
5968                                                 # package does not actually PROVIDE the virtual.
5969                                                 # Filter out any such false matches here.
5970                                                 if not atom_set.findAtomForPackage(pkg):
5971                                                         continue
5972
5973                                         myarg = None
5974                                         if root == self.target_root:
5975                                                 try:
5976                                                         # Ebuild USE must have been calculated prior
5977                                                         # to this point, in case atoms have USE deps.
5978                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
5979                                                 except StopIteration:
5980                                                         pass
5981                                                 except portage.exception.InvalidDependString:
5982                                                         if not installed:
5983                                                                 # masked by corruption
5984                                                                 continue
5985                                         if not installed and myarg:
5986                                                 found_available_arg = True
5987
5988                                         if atom.use and not pkg.built:
5989                                                 use = pkg.use.enabled
5990                                                 if atom.use.enabled.difference(use):
5991                                                         continue
5992                                                 if atom.use.disabled.intersection(use):
5993                                                         continue
5994                                         if pkg.cp == atom_cp:
5995                                                 if highest_version is None:
5996                                                         highest_version = pkg
5997                                                 elif pkg > highest_version:
5998                                                         highest_version = pkg
5999                                         # At this point, we've found the highest visible
6000                                         # match from the current repo. Any lower versions
6001                                         # from this repo are ignored, so this so the loop
6002                                         # will always end with a break statement below
6003                                         # this point.
6004                                         if find_existing_node:
6005                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6006                                                 if not e_pkg:
6007                                                         break
6008                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6009                                                         if highest_version and \
6010                                                                 e_pkg.cp == atom_cp and \
6011                                                                 e_pkg < highest_version and \
6012                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6013                                                                 # There is a higher version available in a
6014                                                                 # different slot, so this existing node is
6015                                                                 # irrelevant.
6016                                                                 pass
6017                                                         else:
6018                                                                 matched_packages.append(e_pkg)
6019                                                                 existing_node = e_pkg
6020                                                 break
6021                                         # Compare built package to current config and
6022                                         # reject the built package if necessary.
6023                                         if built and not installed and \
6024                                                 ("--newuse" in self.myopts or \
6025                                                 "--reinstall" in self.myopts):
6026                                                 iuses = pkg.iuse.all
6027                                                 old_use = pkg.use.enabled
6028                                                 if myeb:
6029                                                         pkgsettings.setcpv(myeb)
6030                                                 else:
6031                                                         pkgsettings.setcpv(pkg)
6032                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6033                                                 forced_flags = set()
6034                                                 forced_flags.update(pkgsettings.useforce)
6035                                                 forced_flags.update(pkgsettings.usemask)
6036                                                 cur_iuse = iuses
6037                                                 if myeb and not usepkgonly:
6038                                                         cur_iuse = myeb.iuse.all
6039                                                 if self._reinstall_for_flags(forced_flags,
6040                                                         old_use, iuses,
6041                                                         now_use, cur_iuse):
6042                                                         break
6043                                         # Compare current config to installed package
6044                                         # and do not reinstall if possible.
6045                                         if not installed and \
6046                                                 ("--newuse" in self.myopts or \
6047                                                 "--reinstall" in self.myopts) and \
6048                                                 cpv in vardb.match(atom):
6049                                                 pkgsettings.setcpv(pkg)
6050                                                 forced_flags = set()
6051                                                 forced_flags.update(pkgsettings.useforce)
6052                                                 forced_flags.update(pkgsettings.usemask)
6053                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6054                                                 old_iuse = set(filter_iuse_defaults(
6055                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6056                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6057                                                 cur_iuse = pkg.iuse.all
6058                                                 reinstall_for_flags = \
6059                                                         self._reinstall_for_flags(
6060                                                         forced_flags, old_use, old_iuse,
6061                                                         cur_use, cur_iuse)
6062                                                 if reinstall_for_flags:
6063                                                         reinstall = True
6064                                         if not built:
6065                                                 myeb = pkg
6066                                         matched_packages.append(pkg)
6067                                         if reinstall_for_flags:
6068                                                 self._reinstall_nodes[pkg] = \
6069                                                         reinstall_for_flags
6070                                         break
6071
6072                 if not matched_packages:
6073                         return None, None
6074
6075                 if "--debug" in self.myopts:
6076                         for pkg in matched_packages:
6077                                 portage.writemsg("%s %s\n" % \
6078                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6079
6080                 # Filter out any old-style virtual matches if they are
6081                 # mixed with new-style virtual matches.
6082                 cp = portage.dep_getkey(atom)
6083                 if len(matched_packages) > 1 and \
6084                         "virtual" == portage.catsplit(cp)[0]:
6085                         for pkg in matched_packages:
6086                                 if pkg.cp != cp:
6087                                         continue
6088                                 # Got a new-style virtual, so filter
6089                                 # out any old-style virtuals.
6090                                 matched_packages = [pkg for pkg in matched_packages \
6091                                         if pkg.cp == cp]
6092                                 break
6093
6094                 if len(matched_packages) > 1:
6095                         bestmatch = portage.best(
6096                                 [pkg.cpv for pkg in matched_packages])
6097                         matched_packages = [pkg for pkg in matched_packages \
6098                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6099
6100                 # ordered by type preference ("ebuild" type is the last resort)
6101                 return  matched_packages[-1], existing_node
6102
6103         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6104                 """
6105                 Select packages that have already been added to the graph or
6106                 those that are installed and have not been scheduled for
6107                 replacement.
6108                 """
6109                 graph_db = self._graph_trees[root]["porttree"].dbapi
6110                 matches = graph_db.match(atom)
6111                 if not matches:
6112                         return None, None
6113                 cpv = matches[-1] # highest match
6114                 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6115                         graph_db.aux_get(cpv, ["SLOT"])[0])
6116                 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6117                 if e_pkg:
6118                         return e_pkg, e_pkg
6119                 # Since this cpv exists in the graph_db,
6120                 # we must have a cached Package instance.
6121                 cache_key = ("installed", root, cpv, "nomerge")
6122                 return (self._pkg_cache[cache_key], None)
6123
6124         def _complete_graph(self):
6125                 """
6126                 Add any deep dependencies of required sets (args, system, world) that
6127                 have not been pulled into the graph yet. This ensures that the graph
6128                 is consistent such that initially satisfied deep dependencies are not
6129                 broken in the new graph. Initially unsatisfied dependencies are
6130                 irrelevant since we only want to avoid breaking dependencies that are
6131                 intially satisfied.
6132
6133                 Since this method can consume enough time to disturb users, it is
6134                 currently only enabled by the --complete-graph option.
6135                 """
6136                 if "--buildpkgonly" in self.myopts or \
6137                         "recurse" not in self.myparams:
6138                         return 1
6139
6140                 if "complete" not in self.myparams:
6141                         # Skip this to avoid consuming enough time to disturb users.
6142                         return 1
6143
6144                 # Put the depgraph into a mode that causes it to only
6145                 # select packages that have already been added to the
6146                 # graph or those that are installed and have not been
6147                 # scheduled for replacement. Also, toggle the "deep"
6148                 # parameter so that all dependencies are traversed and
6149                 # accounted for.
6150                 self._select_atoms = self._select_atoms_from_graph
6151                 self._select_package = self._select_pkg_from_graph
6152                 already_deep = "deep" in self.myparams
6153                 if not already_deep:
6154                         self.myparams.add("deep")
6155
6156                 for root in self.roots:
6157                         required_set_names = self._required_set_names.copy()
6158                         if root == self.target_root and \
6159                                 (already_deep or "empty" in self.myparams):
6160                                 required_set_names.difference_update(self._sets)
6161                         if not required_set_names and not self._ignored_deps:
6162                                 continue
6163                         root_config = self.roots[root]
6164                         setconfig = root_config.setconfig
6165                         args = []
6166                         # Reuse existing SetArg instances when available.
6167                         for arg in self.digraph.root_nodes():
6168                                 if not isinstance(arg, SetArg):
6169                                         continue
6170                                 if arg.root_config != root_config:
6171                                         continue
6172                                 if arg.name in required_set_names:
6173                                         args.append(arg)
6174                                         required_set_names.remove(arg.name)
6175                         # Create new SetArg instances only when necessary.
6176                         for s in required_set_names:
6177                                 expanded_set = InternalPackageSet(
6178                                         initial_atoms=setconfig.getSetAtoms(s))
6179                                 atom = SETPREFIX + s
6180                                 args.append(SetArg(arg=atom, set=expanded_set,
6181                                         root_config=root_config))
6182                         vardb = root_config.trees["vartree"].dbapi
6183                         for arg in args:
6184                                 for atom in arg.set:
6185                                         self._dep_stack.append(
6186                                                 Dependency(atom=atom, root=root, parent=arg))
6187                         if self._ignored_deps:
6188                                 self._dep_stack.extend(self._ignored_deps)
6189                                 self._ignored_deps = []
6190                         if not self._create_graph(allow_unsatisfied=True):
6191                                 return 0
6192                         # Check the unsatisfied deps to see if any initially satisfied deps
6193                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6194                         # deps are irrelevant since we only want to avoid breaking deps
6195                         # that are initially satisfied.
6196                         while self._unsatisfied_deps:
6197                                 dep = self._unsatisfied_deps.pop()
6198                                 matches = vardb.match_pkgs(dep.atom)
6199                                 if not matches:
6200                                         self._initially_unsatisfied_deps.append(dep)
6201                                         continue
6202                                 # An scheduled installation broke a deep dependency.
6203                                 # Add the installed package to the graph so that it
6204                                 # will be appropriately reported as a slot collision
6205                                 # (possibly solvable via backtracking).
6206                                 pkg = matches[-1] # highest match
6207                                 if not self._add_pkg(pkg, dep):
6208                                         return 0
6209                                 if not self._create_graph(allow_unsatisfied=True):
6210                                         return 0
6211                 return 1
6212
6213         def _pkg(self, cpv, type_name, root_config, installed=False):
6214                 """
6215                 Get a package instance from the cache, or create a new
6216                 one if necessary. Raises KeyError from aux_get if it
6217                 failures for some reason (package does not exist or is
6218                 corrupt).
6219                 """
6220                 operation = "merge"
6221                 if installed:
6222                         operation = "nomerge"
6223                 pkg = self._pkg_cache.get(
6224                         (type_name, root_config.root, cpv, operation))
6225                 if pkg is None:
6226                         tree_type = self.pkg_tree_map[type_name]
6227                         db = root_config.trees[tree_type].dbapi
6228                         db_keys = list(self._trees_orig[root_config.root][
6229                                 tree_type].dbapi._aux_cache_keys)
6230                         try:
6231                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6232                         except KeyError:
6233                                 raise portage.exception.PackageNotFound(cpv)
6234                         pkg = Package(cpv=cpv, metadata=metadata,
6235                                 root_config=root_config, installed=installed)
6236                         if type_name == "ebuild":
6237                                 settings = self.pkgsettings[root_config.root]
6238                                 settings.setcpv(pkg)
6239                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6240                         self._pkg_cache[pkg] = pkg
6241                 return pkg
6242
6243         def validate_blockers(self):
6244                 """Remove any blockers from the digraph that do not match any of the
6245                 packages within the graph.  If necessary, create hard deps to ensure
6246                 correct merge order such that mutually blocking packages are never
6247                 installed simultaneously."""
6248
6249                 if "--buildpkgonly" in self.myopts or \
6250                         "--nodeps" in self.myopts:
6251                         return True
6252
6253                 #if "deep" in self.myparams:
6254                 if True:
6255                         # Pull in blockers from all installed packages that haven't already
6256                         # been pulled into the depgraph.  This is not enabled by default
6257                         # due to the performance penalty that is incurred by all the
6258                         # additional dep_check calls that are required.
6259
6260                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6261                         for myroot in self.trees:
6262                                 vardb = self.trees[myroot]["vartree"].dbapi
6263                                 portdb = self.trees[myroot]["porttree"].dbapi
6264                                 pkgsettings = self.pkgsettings[myroot]
6265                                 final_db = self.mydbapi[myroot]
6266
6267                                 blocker_cache = BlockerCache(myroot, vardb)
6268                                 stale_cache = set(blocker_cache)
6269                                 for pkg in vardb:
6270                                         cpv = pkg.cpv
6271                                         stale_cache.discard(cpv)
6272                                         pkg_in_graph = self.digraph.contains(pkg)
6273
6274                                         # Check for masked installed packages. Only warn about
6275                                         # packages that are in the graph in order to avoid warning
6276                                         # about those that will be automatically uninstalled during
6277                                         # the merge process or by --depclean.
6278                                         if pkg in final_db:
6279                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6280                                                         self._masked_installed.add(pkg)
6281
6282                                         blocker_atoms = None
6283                                         blockers = None
6284                                         if pkg_in_graph:
6285                                                 blockers = []
6286                                                 try:
6287                                                         blockers.extend(
6288                                                                 self._blocker_parents.child_nodes(pkg))
6289                                                 except KeyError:
6290                                                         pass
6291                                                 try:
6292                                                         blockers.extend(
6293                                                                 self._irrelevant_blockers.child_nodes(pkg))
6294                                                 except KeyError:
6295                                                         pass
6296                                         if blockers is not None:
6297                                                 blockers = set(str(blocker.atom) \
6298                                                         for blocker in blockers)
6299
6300                                         # If this node has any blockers, create a "nomerge"
6301                                         # node for it so that they can be enforced.
6302                                         self.spinner.update()
6303                                         blocker_data = blocker_cache.get(cpv)
6304                                         if blocker_data is not None and \
6305                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6306                                                 blocker_data = None
6307
6308                                         # If blocker data from the graph is available, use
6309                                         # it to validate the cache and update the cache if
6310                                         # it seems invalid.
6311                                         if blocker_data is not None and \
6312                                                 blockers is not None:
6313                                                 if not blockers.symmetric_difference(
6314                                                         blocker_data.atoms):
6315                                                         continue
6316                                                 blocker_data = None
6317
6318                                         if blocker_data is None and \
6319                                                 blockers is not None:
6320                                                 # Re-use the blockers from the graph.
6321                                                 blocker_atoms = sorted(blockers)
6322                                                 counter = long(pkg.metadata["COUNTER"])
6323                                                 blocker_data = \
6324                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6325                                                 blocker_cache[pkg.cpv] = blocker_data
6326                                                 continue
6327
6328                                         if blocker_data:
6329                                                 blocker_atoms = blocker_data.atoms
6330                                         else:
6331                                                 # Use aux_get() to trigger FakeVartree global
6332                                                 # updates on *DEPEND when appropriate.
6333                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6334                                                 # It is crucial to pass in final_db here in order to
6335                                                 # optimize dep_check calls by eliminating atoms via
6336                                                 # dep_wordreduce and dep_eval calls.
6337                                                 try:
6338                                                         portage.dep._dep_check_strict = False
6339                                                         try:
6340                                                                 success, atoms = portage.dep_check(depstr,
6341                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6342                                                                         trees=self._graph_trees, myroot=myroot)
6343                                                         except Exception, e:
6344                                                                 if isinstance(e, SystemExit):
6345                                                                         raise
6346                                                                 # This is helpful, for example, if a ValueError
6347                                                                 # is thrown from cpv_expand due to multiple
6348                                                                 # matches (this can happen if an atom lacks a
6349                                                                 # category).
6350                                                                 show_invalid_depstring_notice(
6351                                                                         pkg, depstr, str(e))
6352                                                                 del e
6353                                                                 raise
6354                                                 finally:
6355                                                         portage.dep._dep_check_strict = True
6356                                                 if not success:
6357                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6358                                                         if replacement_pkg and \
6359                                                                 replacement_pkg[0].operation == "merge":
6360                                                                 # This package is being replaced anyway, so
6361                                                                 # ignore invalid dependencies so as not to
6362                                                                 # annoy the user too much (otherwise they'd be
6363                                                                 # forced to manually unmerge it first).
6364                                                                 continue
6365                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6366                                                         return False
6367                                                 blocker_atoms = [myatom for myatom in atoms \
6368                                                         if myatom.startswith("!")]
6369                                                 blocker_atoms.sort()
6370                                                 counter = long(pkg.metadata["COUNTER"])
6371                                                 blocker_cache[cpv] = \
6372                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6373                                         if blocker_atoms:
6374                                                 try:
6375                                                         for atom in blocker_atoms:
6376                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6377                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6378                                                                 self._blocker_parents.add(blocker, pkg)
6379                                                 except portage.exception.InvalidAtom, e:
6380                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6381                                                         show_invalid_depstring_notice(
6382                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6383                                                         return False
6384                                 for cpv in stale_cache:
6385                                         del blocker_cache[cpv]
6386                                 blocker_cache.flush()
6387                                 del blocker_cache
6388
6389                 # Discard any "uninstall" tasks scheduled by previous calls
6390                 # to this method, since those tasks may not make sense given
6391                 # the current graph state.
6392                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6393                 if previous_uninstall_tasks:
6394                         self._blocker_uninstalls = digraph()
6395                         self.digraph.difference_update(previous_uninstall_tasks)
6396
6397                 for blocker in self._blocker_parents.leaf_nodes():
6398                         self.spinner.update()
6399                         root_config = self.roots[blocker.root]
6400                         virtuals = root_config.settings.getvirtuals()
6401                         myroot = blocker.root
6402                         initial_db = self.trees[myroot]["vartree"].dbapi
6403                         final_db = self.mydbapi[myroot]
6404                         
6405                         provider_virtual = False
6406                         if blocker.cp in virtuals and \
6407                                 not self._have_new_virt(blocker.root, blocker.cp):
6408                                 provider_virtual = True
6409
6410                         if provider_virtual:
6411                                 atoms = []
6412                                 for provider_entry in virtuals[blocker.cp]:
6413                                         provider_cp = \
6414                                                 portage.dep_getkey(provider_entry)
6415                                         atoms.append(blocker.atom.replace(
6416                                                 blocker.cp, provider_cp))
6417                         else:
6418                                 atoms = [blocker.atom]
6419
6420                         blocked_initial = []
6421                         for atom in atoms:
6422                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6423
6424                         blocked_final = []
6425                         for atom in atoms:
6426                                 blocked_final.extend(final_db.match_pkgs(atom))
6427
6428                         if not blocked_initial and not blocked_final:
6429                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6430                                 self._blocker_parents.remove(blocker)
6431                                 # Discard any parents that don't have any more blockers.
6432                                 for pkg in parent_pkgs:
6433                                         self._irrelevant_blockers.add(blocker, pkg)
6434                                         if not self._blocker_parents.child_nodes(pkg):
6435                                                 self._blocker_parents.remove(pkg)
6436                                 continue
6437                         for parent in self._blocker_parents.parent_nodes(blocker):
6438                                 unresolved_blocks = False
6439                                 depends_on_order = set()
6440                                 for pkg in blocked_initial:
6441                                         if pkg.slot_atom == parent.slot_atom:
6442                                                 # TODO: Support blocks within slots in cases where it
6443                                                 # might make sense.  For example, a new version might
6444                                                 # require that the old version be uninstalled at build
6445                                                 # time.
6446                                                 continue
6447                                         if parent.installed:
6448                                                 # Two currently installed packages conflict with
6449                                                 # eachother. Ignore this case since the damage
6450                                                 # is already done and this would be likely to
6451                                                 # confuse users if displayed like a normal blocker.
6452                                                 continue
6453
6454                                         self._blocked_pkgs.add(pkg, blocker)
6455
6456                                         if parent.operation == "merge":
6457                                                 # Maybe the blocked package can be replaced or simply
6458                                                 # unmerged to resolve this block.
6459                                                 depends_on_order.add((pkg, parent))
6460                                                 continue
6461                                         # None of the above blocker resolutions techniques apply,
6462                                         # so apparently this one is unresolvable.
6463                                         unresolved_blocks = True
6464                                 for pkg in blocked_final:
6465                                         if pkg.slot_atom == parent.slot_atom:
6466                                                 # TODO: Support blocks within slots.
6467                                                 continue
6468                                         if parent.operation == "nomerge" and \
6469                                                 pkg.operation == "nomerge":
6470                                                 # This blocker will be handled the next time that a
6471                                                 # merge of either package is triggered.
6472                                                 continue
6473
6474                                         self._blocked_pkgs.add(pkg, blocker)
6475
6476                                         # Maybe the blocking package can be
6477                                         # unmerged to resolve this block.
6478                                         if parent.operation == "merge" and pkg.installed:
6479                                                 depends_on_order.add((pkg, parent))
6480                                                 continue
6481                                         elif parent.operation == "nomerge":
6482                                                 depends_on_order.add((parent, pkg))
6483                                                 continue
6484                                         # None of the above blocker resolutions techniques apply,
6485                                         # so apparently this one is unresolvable.
6486                                         unresolved_blocks = True
6487
6488                                 # Make sure we don't unmerge any package that have been pulled
6489                                 # into the graph.
6490                                 if not unresolved_blocks and depends_on_order:
6491                                         for inst_pkg, inst_task in depends_on_order:
6492                                                 if self.digraph.contains(inst_pkg) and \
6493                                                         self.digraph.parent_nodes(inst_pkg):
6494                                                         unresolved_blocks = True
6495                                                         break
6496
6497                                 if not unresolved_blocks and depends_on_order:
6498                                         for inst_pkg, inst_task in depends_on_order:
6499                                                 uninst_task = Package(built=inst_pkg.built,
6500                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6501                                                         metadata=inst_pkg.metadata,
6502                                                         operation="uninstall",
6503                                                         root_config=inst_pkg.root_config,
6504                                                         type_name=inst_pkg.type_name)
6505                                                 self._pkg_cache[uninst_task] = uninst_task
6506                                                 # Enforce correct merge order with a hard dep.
6507                                                 self.digraph.addnode(uninst_task, inst_task,
6508                                                         priority=BlockerDepPriority.instance)
6509                                                 # Count references to this blocker so that it can be
6510                                                 # invalidated after nodes referencing it have been
6511                                                 # merged.
6512                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6513                                 if not unresolved_blocks and not depends_on_order:
6514                                         self._irrelevant_blockers.add(blocker, parent)
6515                                         self._blocker_parents.remove_edge(blocker, parent)
6516                                         if not self._blocker_parents.parent_nodes(blocker):
6517                                                 self._blocker_parents.remove(blocker)
6518                                         if not self._blocker_parents.child_nodes(parent):
6519                                                 self._blocker_parents.remove(parent)
6520                                 if unresolved_blocks:
6521                                         self._unsolvable_blockers.add(blocker, parent)
6522
6523                 return True
6524
6525         def _accept_blocker_conflicts(self):
6526                 acceptable = False
6527                 for x in ("--buildpkgonly", "--fetchonly",
6528                         "--fetch-all-uri", "--nodeps"):
6529                         if x in self.myopts:
6530                                 acceptable = True
6531                                 break
6532                 return acceptable
6533
6534         def _merge_order_bias(self, mygraph):
6535                 """Order nodes from highest to lowest overall reference count for
6536                 optimal leaf node selection."""
6537                 node_info = {}
6538                 for node in mygraph.order:
6539                         node_info[node] = len(mygraph.parent_nodes(node))
6540                 def cmp_merge_preference(node1, node2):
6541                         return node_info[node2] - node_info[node1]
6542                 mygraph.order.sort(cmp_merge_preference)
6543
6544         def altlist(self, reversed=False):
6545
6546                 while self._serialized_tasks_cache is None:
6547                         self._resolve_conflicts()
6548                         try:
6549                                 self._serialized_tasks_cache, self._scheduler_graph = \
6550                                         self._serialize_tasks()
6551                         except self._serialize_tasks_retry:
6552                                 pass
6553
6554                 retlist = self._serialized_tasks_cache[:]
6555                 if reversed:
6556                         retlist.reverse()
6557                 return retlist
6558
6559         def schedulerGraph(self):
6560                 """
6561                 The scheduler graph is identical to the normal one except that
6562                 uninstall edges are reversed in specific cases that require
6563                 conflicting packages to be temporarily installed simultaneously.
6564                 This is intended for use by the Scheduler in it's parallelization
6565                 logic. It ensures that temporary simultaneous installation of
6566                 conflicting packages is avoided when appropriate (especially for
6567                 !!atom blockers), but allowed in specific cases that require it.
6568
6569                 Note that this method calls break_refs() which alters the state of
6570                 internal Package instances such that this depgraph instance should
6571                 not be used to perform any more calculations.
6572                 """
6573                 if self._scheduler_graph is None:
6574                         self.altlist()
6575                 self.break_refs(self._scheduler_graph.order)
6576                 return self._scheduler_graph
6577
6578         def break_refs(self, nodes):
6579                 """
6580                 Take a mergelist like that returned from self.altlist() and
6581                 break any references that lead back to the depgraph. This is
6582                 useful if you want to hold references to packages without
6583                 also holding the depgraph on the heap.
6584                 """
6585                 for node in nodes:
6586                         if hasattr(node, "root_config"):
6587                                 # The FakeVartree references the _package_cache which
6588                                 # references the depgraph. So that Package instances don't
6589                                 # hold the depgraph and FakeVartree on the heap, replace
6590                                 # the RootConfig that references the FakeVartree with the
6591                                 # original RootConfig instance which references the actual
6592                                 # vartree.
6593                                 node.root_config = \
6594                                         self._trees_orig[node.root_config.root]["root_config"]
6595
6596         def _resolve_conflicts(self):
6597                 if not self._complete_graph():
6598                         raise self._unknown_internal_error()
6599
6600                 if not self.validate_blockers():
6601                         raise self._unknown_internal_error()
6602
6603                 if self._slot_collision_info:
6604                         self._process_slot_conflicts()
6605
6606         def _serialize_tasks(self):
6607
6608                 if "--debug" in self.myopts:
6609                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6610                         self.digraph.debug_print()
6611                         writemsg("\n", noiselevel=-1)
6612
6613                 scheduler_graph = self.digraph.copy()
6614                 mygraph=self.digraph.copy()
6615                 # Prune "nomerge" root nodes if nothing depends on them, since
6616                 # otherwise they slow down merge order calculation. Don't remove
6617                 # non-root nodes since they help optimize merge order in some cases
6618                 # such as revdep-rebuild.
6619                 removed_nodes = set()
6620                 while True:
6621                         for node in mygraph.root_nodes():
6622                                 if not isinstance(node, Package) or \
6623                                         node.installed or node.onlydeps:
6624                                         removed_nodes.add(node)
6625                         if removed_nodes:
6626                                 self.spinner.update()
6627                                 mygraph.difference_update(removed_nodes)
6628                         if not removed_nodes:
6629                                 break
6630                         removed_nodes.clear()
6631                 self._merge_order_bias(mygraph)
6632                 def cmp_circular_bias(n1, n2):
6633                         """
6634                         RDEPEND is stronger than PDEPEND and this function
6635                         measures such a strength bias within a circular
6636                         dependency relationship.
6637                         """
6638                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6639                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6640                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6641                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6642                         if n1_n2_medium == n2_n1_medium:
6643                                 return 0
6644                         elif n1_n2_medium:
6645                                 return 1
6646                         return -1
6647                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6648                 retlist=[]
6649                 # Contains uninstall tasks that have been scheduled to
6650                 # occur after overlapping blockers have been installed.
6651                 scheduled_uninstalls = set()
6652                 # Contains any Uninstall tasks that have been ignored
6653                 # in order to avoid the circular deps code path. These
6654                 # correspond to blocker conflicts that could not be
6655                 # resolved.
6656                 ignored_uninstall_tasks = set()
6657                 have_uninstall_task = False
6658                 complete = "complete" in self.myparams
6659                 asap_nodes = []
6660
6661                 def get_nodes(**kwargs):
6662                         """
6663                         Returns leaf nodes excluding Uninstall instances
6664                         since those should be executed as late as possible.
6665                         """
6666                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6667                                 if isinstance(node, Package) and \
6668                                         (node.operation != "uninstall" or \
6669                                         node in scheduled_uninstalls)]
6670
6671                 # sys-apps/portage needs special treatment if ROOT="/"
6672                 running_root = self._running_root.root
6673                 from portage.const import PORTAGE_PACKAGE_ATOM
6674                 runtime_deps = InternalPackageSet(
6675                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6676                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6677                         PORTAGE_PACKAGE_ATOM)
6678                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6679                         PORTAGE_PACKAGE_ATOM)
6680
6681                 if running_portage:
6682                         running_portage = running_portage[0]
6683                 else:
6684                         running_portage = None
6685
6686                 if replacement_portage:
6687                         replacement_portage = replacement_portage[0]
6688                 else:
6689                         replacement_portage = None
6690
6691                 if replacement_portage == running_portage:
6692                         replacement_portage = None
6693
6694                 if replacement_portage is not None:
6695                         # update from running_portage to replacement_portage asap
6696                         asap_nodes.append(replacement_portage)
6697
6698                 if running_portage is not None:
6699                         try:
6700                                 portage_rdepend = self._select_atoms_highest_available(
6701                                         running_root, running_portage.metadata["RDEPEND"],
6702                                         myuse=running_portage.use.enabled,
6703                                         parent=running_portage, strict=False)
6704                         except portage.exception.InvalidDependString, e:
6705                                 portage.writemsg("!!! Invalid RDEPEND in " + \
6706                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6707                                         (running_root, running_portage.cpv, e), noiselevel=-1)
6708                                 del e
6709                                 portage_rdepend = []
6710                         runtime_deps.update(atom for atom in portage_rdepend \
6711                                 if not atom.startswith("!"))
6712
6713                 ignore_priority_soft_range = [None]
6714                 ignore_priority_soft_range.extend(
6715                         xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6716                 tree_mode = "--tree" in self.myopts
6717                 # Tracks whether or not the current iteration should prefer asap_nodes
6718                 # if available.  This is set to False when the previous iteration
6719                 # failed to select any nodes.  It is reset whenever nodes are
6720                 # successfully selected.
6721                 prefer_asap = True
6722
6723                 # By default, try to avoid selecting root nodes whenever possible. This
6724                 # helps ensure that the maximimum possible number of soft dependencies
6725                 # have been removed from the graph before their parent nodes have
6726                 # selected. This is especially important when those dependencies are
6727                 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6728                 # CHOST has been changed (like when building a stage3 from a stage2).
6729                 accept_root_node = False
6730
6731                 # State of prefer_asap and accept_root_node flags for successive
6732                 # iterations that loosen the criteria for node selection.
6733                 #
6734                 # iteration   prefer_asap   accept_root_node
6735                 # 1           True          False
6736                 # 2           False         False
6737                 # 3           False         True
6738                 #
6739                 # If no nodes are selected on the 3rd iteration, it is due to
6740                 # unresolved blockers or circular dependencies.
6741
6742                 while not mygraph.empty():
6743                         self.spinner.update()
6744                         selected_nodes = None
6745                         ignore_priority = None
6746                         if prefer_asap and asap_nodes:
6747                                 """ASAP nodes are merged before their soft deps."""
6748                                 asap_nodes = [node for node in asap_nodes \
6749                                         if mygraph.contains(node)]
6750                                 for node in asap_nodes:
6751                                         if not mygraph.child_nodes(node,
6752                                                 ignore_priority=DepPriority.SOFT):
6753                                                 selected_nodes = [node]
6754                                                 asap_nodes.remove(node)
6755                                                 break
6756                         if not selected_nodes and \
6757                                 not (prefer_asap and asap_nodes):
6758                                 for ignore_priority in ignore_priority_soft_range:
6759                                         nodes = get_nodes(ignore_priority=ignore_priority)
6760                                         if nodes:
6761                                                 break
6762                                 if nodes:
6763                                         if ignore_priority is None and not tree_mode:
6764                                                 # Greedily pop all of these nodes since no relationship
6765                                                 # has been ignored.  This optimization destroys --tree
6766                                                 # output, so it's disabled in reversed mode. If there
6767                                                 # is a mix of merge and uninstall nodes, save the
6768                                                 # uninstall nodes from later since sometimes a merge
6769                                                 # node will render an install node unnecessary, and
6770                                                 # we want to avoid doing a separate uninstall task in
6771                                                 # that case.
6772                                                 merge_nodes = [node for node in nodes \
6773                                                         if node.operation == "merge"]
6774                                                 if merge_nodes:
6775                                                         selected_nodes = merge_nodes
6776                                                 else:
6777                                                         selected_nodes = nodes
6778                                         else:
6779                                                 # For optimal merge order:
6780                                                 #  * Only pop one node.
6781                                                 #  * Removing a root node (node without a parent)
6782                                                 #    will not produce a leaf node, so avoid it.
6783                                                 for node in nodes:
6784                                                         if mygraph.parent_nodes(node):
6785                                                                 # found a non-root node
6786                                                                 selected_nodes = [node]
6787                                                                 break
6788                                                 if not selected_nodes and \
6789                                                         (accept_root_node or ignore_priority is None):
6790                                                         # settle for a root node
6791                                                         selected_nodes = [nodes[0]]
6792
6793                         if not selected_nodes:
6794                                 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6795                                 if nodes:
6796                                         """Recursively gather a group of nodes that RDEPEND on
6797                                         eachother.  This ensures that they are merged as a group
6798                                         and get their RDEPENDs satisfied as soon as possible."""
6799                                         def gather_deps(ignore_priority,
6800                                                 mergeable_nodes, selected_nodes, node):
6801                                                 if node in selected_nodes:
6802                                                         return True
6803                                                 if node not in mergeable_nodes:
6804                                                         return False
6805                                                 if node == replacement_portage and \
6806                                                         mygraph.child_nodes(node,
6807                                                         ignore_priority=DepPriority.MEDIUM_SOFT):
6808                                                         # Make sure that portage always has all of it's
6809                                                         # RDEPENDs installed first.
6810                                                         return False
6811                                                 selected_nodes.add(node)
6812                                                 for child in mygraph.child_nodes(node,
6813                                                         ignore_priority=ignore_priority):
6814                                                         if not gather_deps(ignore_priority,
6815                                                                 mergeable_nodes, selected_nodes, child):
6816                                                                 return False
6817                                                 return True
6818                                         mergeable_nodes = set(nodes)
6819                                         if prefer_asap and asap_nodes:
6820                                                 nodes = asap_nodes
6821                                         for ignore_priority in xrange(DepPriority.SOFT,
6822                                                 DepPriority.MEDIUM_SOFT + 1):
6823                                                 for node in nodes:
6824                                                         if nodes is not asap_nodes and \
6825                                                                 not accept_root_node and \
6826                                                                 not mygraph.parent_nodes(node):
6827                                                                 continue
6828                                                         selected_nodes = set()
6829                                                         if gather_deps(ignore_priority,
6830                                                                 mergeable_nodes, selected_nodes, node):
6831                                                                 break
6832                                                         else:
6833                                                                 selected_nodes = None
6834                                                 if selected_nodes:
6835                                                         break
6836
6837                                         # If any nodes have been selected here, it's always
6838                                         # possible that anything up to a MEDIUM_SOFT priority
6839                                         # relationship has been ignored. This state is recorded
6840                                         # in ignore_priority so that relevant nodes will be
6841                                         # added to asap_nodes when appropriate.
6842                                         if selected_nodes:
6843                                                 ignore_priority = DepPriority.MEDIUM_SOFT
6844
6845                                         if prefer_asap and asap_nodes and not selected_nodes:
6846                                                 # We failed to find any asap nodes to merge, so ignore
6847                                                 # them for the next iteration.
6848                                                 prefer_asap = False
6849                                                 continue
6850
6851                                         if not selected_nodes and not accept_root_node:
6852                                                 # Maybe there are only root nodes left, so accept them
6853                                                 # for the next iteration.
6854                                                 accept_root_node = True
6855                                                 continue
6856
6857                         if selected_nodes and ignore_priority > DepPriority.SOFT:
6858                                 # Try to merge ignored medium deps as soon as possible.
6859                                 for node in selected_nodes:
6860                                         children = set(mygraph.child_nodes(node))
6861                                         soft = children.difference(
6862                                                 mygraph.child_nodes(node,
6863                                                 ignore_priority=DepPriority.SOFT))
6864                                         medium_soft = children.difference(
6865                                                 mygraph.child_nodes(node,
6866                                                 ignore_priority=DepPriority.MEDIUM_SOFT))
6867                                         medium_soft.difference_update(soft)
6868                                         for child in medium_soft:
6869                                                 if child in selected_nodes:
6870                                                         continue
6871                                                 if child in asap_nodes:
6872                                                         continue
6873                                                 asap_nodes.append(child)
6874
6875                         if selected_nodes and len(selected_nodes) > 1:
6876                                 if not isinstance(selected_nodes, list):
6877                                         selected_nodes = list(selected_nodes)
6878                                 selected_nodes.sort(cmp_circular_bias)
6879
6880                         if not selected_nodes and not myblocker_uninstalls.is_empty():
6881                                 # An Uninstall task needs to be executed in order to
6882                                 # avoid conflict if possible.
6883                                 min_parent_deps = None
6884                                 uninst_task = None
6885                                 for task in myblocker_uninstalls.leaf_nodes():
6886                                         # Do some sanity checks so that system or world packages
6887                                         # don't get uninstalled inappropriately here (only really
6888                                         # necessary when --complete-graph has not been enabled).
6889
6890                                         if task in ignored_uninstall_tasks:
6891                                                 continue
6892
6893                                         if task in scheduled_uninstalls:
6894                                                 # It's been scheduled but it hasn't
6895                                                 # been executed yet due to dependence
6896                                                 # on installation of blocking packages.
6897                                                 continue
6898
6899                                         root_config = self.roots[task.root]
6900                                         inst_pkg = self._pkg_cache[
6901                                                 ("installed", task.root, task.cpv, "nomerge")]
6902
6903                                         if self.digraph.contains(inst_pkg):
6904                                                 continue
6905
6906                                         forbid_overlap = False
6907                                         heuristic_overlap = False
6908                                         for blocker in myblocker_uninstalls.parent_nodes(task):
6909                                                 if blocker.eapi in ("0", "1"):
6910                                                         heuristic_overlap = True
6911                                                 elif blocker.atom.blocker.overlap.forbid:
6912                                                         forbid_overlap = True
6913                                                         break
6914                                         if forbid_overlap and running_root == task.root:
6915                                                 continue
6916
6917                                         if heuristic_overlap and running_root == task.root:
6918                                                 # Never uninstall sys-apps/portage or it's essential
6919                                                 # dependencies, except through replacement.
6920                                                 try:
6921                                                         runtime_dep_atoms = \
6922                                                                 list(runtime_deps.iterAtomsForPackage(task))
6923                                                 except portage.exception.InvalidDependString, e:
6924                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6925                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6926                                                                 (task.root, task.cpv, e), noiselevel=-1)
6927                                                         del e
6928                                                         continue
6929
6930                                                 # Don't uninstall a runtime dep if it appears
6931                                                 # to be the only suitable one installed.
6932                                                 skip = False
6933                                                 vardb = root_config.trees["vartree"].dbapi
6934                                                 for atom in runtime_dep_atoms:
6935                                                         other_version = None
6936                                                         for pkg in vardb.match_pkgs(atom):
6937                                                                 if pkg.cpv == task.cpv and \
6938                                                                         pkg.metadata["COUNTER"] == \
6939                                                                         task.metadata["COUNTER"]:
6940                                                                         continue
6941                                                                 other_version = pkg
6942                                                                 break
6943                                                         if other_version is None:
6944                                                                 skip = True
6945                                                                 break
6946                                                 if skip:
6947                                                         continue
6948
6949                                                 # For packages in the system set, don't take
6950                                                 # any chances. If the conflict can't be resolved
6951                                                 # by a normal replacement operation then abort.
6952                                                 skip = False
6953                                                 try:
6954                                                         for atom in root_config.sets[
6955                                                                 "system"].iterAtomsForPackage(task):
6956                                                                 skip = True
6957                                                                 break
6958                                                 except portage.exception.InvalidDependString, e:
6959                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6960                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6961                                                                 (task.root, task.cpv, e), noiselevel=-1)
6962                                                         del e
6963                                                         skip = True
6964                                                 if skip:
6965                                                         continue
6966
6967                                         # Note that the world check isn't always
6968                                         # necessary since self._complete_graph() will
6969                                         # add all packages from the system and world sets to the
6970                                         # graph. This just allows unresolved conflicts to be
6971                                         # detected as early as possible, which makes it possible
6972                                         # to avoid calling self._complete_graph() when it is
6973                                         # unnecessary due to blockers triggering an abortion.
6974                                         if not complete:
6975                                                 # For packages in the world set, go ahead an uninstall
6976                                                 # when necessary, as long as the atom will be satisfied
6977                                                 # in the final state.
6978                                                 graph_db = self.mydbapi[task.root]
6979                                                 skip = False
6980                                                 try:
6981                                                         for atom in root_config.sets[
6982                                                                 "world"].iterAtomsForPackage(task):
6983                                                                 satisfied = False
6984                                                                 for pkg in graph_db.match_pkgs(atom):
6985                                                                         if pkg == inst_pkg:
6986                                                                                 continue
6987                                                                         satisfied = True
6988                                                                         break
6989                                                                 if not satisfied:
6990                                                                         skip = True
6991                                                                         self._blocked_world_pkgs[inst_pkg] = atom
6992                                                                         break
6993                                                 except portage.exception.InvalidDependString, e:
6994                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6995                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6996                                                                 (task.root, task.cpv, e), noiselevel=-1)
6997                                                         del e
6998                                                         skip = True
6999                                                 if skip:
7000                                                         continue
7001
7002                                         # Check the deps of parent nodes to ensure that
7003                                         # the chosen task produces a leaf node. Maybe
7004                                         # this can be optimized some more to make the
7005                                         # best possible choice, but the current algorithm
7006                                         # is simple and should be near optimal for most
7007                                         # common cases.
7008                                         parent_deps = set()
7009                                         for parent in mygraph.parent_nodes(task):
7010                                                 parent_deps.update(mygraph.child_nodes(parent,
7011                                                         ignore_priority=DepPriority.MEDIUM_SOFT))
7012                                         parent_deps.remove(task)
7013                                         if min_parent_deps is None or \
7014                                                 len(parent_deps) < min_parent_deps:
7015                                                 min_parent_deps = len(parent_deps)
7016                                                 uninst_task = task
7017
7018                                 if uninst_task is not None:
7019                                         # The uninstall is performed only after blocking
7020                                         # packages have been merged on top of it. File
7021                                         # collisions between blocking packages are detected
7022                                         # and removed from the list of files to be uninstalled.
7023                                         scheduled_uninstalls.add(uninst_task)
7024                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7025
7026                                         # Reverse the parent -> uninstall edges since we want
7027                                         # to do the uninstall after blocking packages have
7028                                         # been merged on top of it.
7029                                         mygraph.remove(uninst_task)
7030                                         for blocked_pkg in parent_nodes:
7031                                                 mygraph.add(blocked_pkg, uninst_task,
7032                                                         priority=BlockerDepPriority.instance)
7033                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7034                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7035                                                         priority=BlockerDepPriority.instance)
7036
7037                                 else:
7038                                         # None of the Uninstall tasks are acceptable, so
7039                                         # the corresponding blockers are unresolvable.
7040                                         # We need to drop an Uninstall task here in order
7041                                         # to avoid the circular deps code path, but the
7042                                         # blocker will still be counted as an unresolved
7043                                         # conflict.
7044                                         for node in myblocker_uninstalls.leaf_nodes():
7045                                                 try:
7046                                                         mygraph.remove(node)
7047                                                 except KeyError:
7048                                                         pass
7049                                                 else:
7050                                                         uninst_task = node
7051                                                         ignored_uninstall_tasks.add(node)
7052                                                         break
7053
7054                                 if uninst_task is not None:
7055                                         # After dropping an Uninstall task, reset
7056                                         # the state variables for leaf node selection and
7057                                         # continue trying to select leaf nodes.
7058                                         prefer_asap = True
7059                                         accept_root_node = False
7060                                         continue
7061
7062                         if not selected_nodes:
7063                                 self._circular_deps_for_display = mygraph
7064                                 raise self._unknown_internal_error()
7065
7066                         # At this point, we've succeeded in selecting one or more nodes, so
7067                         # it's now safe to reset the prefer_asap and accept_root_node flags
7068                         # to their default states.
7069                         prefer_asap = True
7070                         accept_root_node = False
7071
7072                         mygraph.difference_update(selected_nodes)
7073
7074                         for node in selected_nodes:
7075                                 if isinstance(node, Package) and \
7076                                         node.operation == "nomerge":
7077                                         continue
7078
7079                                 # Handle interactions between blockers
7080                                 # and uninstallation tasks.
7081                                 solved_blockers = set()
7082                                 uninst_task = None
7083                                 if isinstance(node, Package) and \
7084                                         "uninstall" == node.operation:
7085                                         have_uninstall_task = True
7086                                         uninst_task = node
7087                                 else:
7088                                         vardb = self.trees[node.root]["vartree"].dbapi
7089                                         previous_cpv = vardb.match(node.slot_atom)
7090                                         if previous_cpv:
7091                                                 # The package will be replaced by this one, so remove
7092                                                 # the corresponding Uninstall task if necessary.
7093                                                 previous_cpv = previous_cpv[0]
7094                                                 uninst_task = \
7095                                                         ("installed", node.root, previous_cpv, "uninstall")
7096                                                 try:
7097                                                         mygraph.remove(uninst_task)
7098                                                 except KeyError:
7099                                                         pass
7100
7101                                 if uninst_task is not None and \
7102                                         uninst_task not in ignored_uninstall_tasks and \
7103                                         myblocker_uninstalls.contains(uninst_task):
7104                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7105                                         myblocker_uninstalls.remove(uninst_task)
7106                                         # Discard any blockers that this Uninstall solves.
7107                                         for blocker in blocker_nodes:
7108                                                 if not myblocker_uninstalls.child_nodes(blocker):
7109                                                         myblocker_uninstalls.remove(blocker)
7110                                                         solved_blockers.add(blocker)
7111
7112                                 retlist.append(node)
7113
7114                                 if (isinstance(node, Package) and \
7115                                         "uninstall" == node.operation) or \
7116                                         (uninst_task is not None and \
7117                                         uninst_task in scheduled_uninstalls):
7118                                         # Include satisfied blockers in the merge list
7119                                         # since the user might be interested and also
7120                                         # it serves as an indicator that blocking packages
7121                                         # will be temporarily installed simultaneously.
7122                                         for blocker in solved_blockers:
7123                                                 retlist.append(Blocker(atom=blocker.atom,
7124                                                         root=blocker.root, eapi=blocker.eapi,
7125                                                         satisfied=True))
7126
7127                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7128                 for node in myblocker_uninstalls.root_nodes():
7129                         unsolvable_blockers.add(node)
7130
7131                 for blocker in unsolvable_blockers:
7132                         retlist.append(blocker)
7133
7134                 # If any Uninstall tasks need to be executed in order
7135                 # to avoid a conflict, complete the graph with any
7136                 # dependencies that may have been initially
7137                 # neglected (to ensure that unsafe Uninstall tasks
7138                 # are properly identified and blocked from execution).
7139                 if have_uninstall_task and \
7140                         not complete and \
7141                         not unsolvable_blockers:
7142                         self.myparams.add("complete")
7143                         raise self._serialize_tasks_retry("")
7144
7145                 if unsolvable_blockers and \
7146                         not self._accept_blocker_conflicts():
7147                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7148                         self._serialized_tasks_cache = retlist[:]
7149                         self._scheduler_graph = scheduler_graph
7150                         raise self._unknown_internal_error()
7151
7152                 if self._slot_collision_info and \
7153                         not self._accept_blocker_conflicts():
7154                         self._serialized_tasks_cache = retlist[:]
7155                         self._scheduler_graph = scheduler_graph
7156                         raise self._unknown_internal_error()
7157
7158                 return retlist, scheduler_graph
7159
7160         def _show_circular_deps(self, mygraph):
7161                 # No leaf nodes are available, so we have a circular
7162                 # dependency panic situation.  Reduce the noise level to a
7163                 # minimum via repeated elimination of root nodes since they
7164                 # have no parents and thus can not be part of a cycle.
7165                 while True:
7166                         root_nodes = mygraph.root_nodes(
7167                                 ignore_priority=DepPriority.MEDIUM_SOFT)
7168                         if not root_nodes:
7169                                 break
7170                         mygraph.difference_update(root_nodes)
7171                 # Display the USE flags that are enabled on nodes that are part
7172                 # of dependency cycles in case that helps the user decide to
7173                 # disable some of them.
7174                 display_order = []
7175                 tempgraph = mygraph.copy()
7176                 while not tempgraph.empty():
7177                         nodes = tempgraph.leaf_nodes()
7178                         if not nodes:
7179                                 node = tempgraph.order[0]
7180                         else:
7181                                 node = nodes[0]
7182                         display_order.append(node)
7183                         tempgraph.remove(node)
7184                 display_order.reverse()
7185                 self.myopts.pop("--quiet", None)
7186                 self.myopts.pop("--verbose", None)
7187                 self.myopts["--tree"] = True
7188                 portage.writemsg("\n\n", noiselevel=-1)
7189                 self.display(display_order)
7190                 prefix = colorize("BAD", " * ")
7191                 portage.writemsg("\n", noiselevel=-1)
7192                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7193                         noiselevel=-1)
7194                 portage.writemsg("\n", noiselevel=-1)
7195                 mygraph.debug_print()
7196                 portage.writemsg("\n", noiselevel=-1)
7197                 portage.writemsg(prefix + "Note that circular dependencies " + \
7198                         "can often be avoided by temporarily\n", noiselevel=-1)
7199                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7200                         "optional dependencies.\n", noiselevel=-1)
7201
7202         def _show_merge_list(self):
7203                 if self._serialized_tasks_cache is not None and \
7204                         not (self._displayed_list and \
7205                         (self._displayed_list == self._serialized_tasks_cache or \
7206                         self._displayed_list == \
7207                                 list(reversed(self._serialized_tasks_cache)))):
7208                         display_list = self._serialized_tasks_cache[:]
7209                         if "--tree" in self.myopts:
7210                                 display_list.reverse()
7211                         self.display(display_list)
7212
7213         def _show_unsatisfied_blockers(self, blockers):
7214                 self._show_merge_list()
7215                 msg = "Error: The above package list contains " + \
7216                         "packages which cannot be installed " + \
7217                         "at the same time on the same system."
7218                 prefix = colorize("BAD", " * ")
7219                 from textwrap import wrap
7220                 portage.writemsg("\n", noiselevel=-1)
7221                 for line in wrap(msg, 70):
7222                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7223
7224                 # Display the conflicting packages along with the packages
7225                 # that pulled them in. This is helpful for troubleshooting
7226                 # cases in which blockers don't solve automatically and
7227                 # the reasons are not apparent from the normal merge list
7228                 # display.
7229
7230                 conflict_pkgs = {}
7231                 for blocker in blockers:
7232                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7233                                 self._blocker_parents.parent_nodes(blocker)):
7234                                 parent_atoms = self._parent_atoms.get(pkg)
7235                                 if not parent_atoms:
7236                                         atom = self._blocked_world_pkgs.get(pkg)
7237                                         if atom is not None:
7238                                                 parent_atoms = set([("@world", atom)])
7239                                 if parent_atoms:
7240                                         conflict_pkgs[pkg] = parent_atoms
7241
7242                 if conflict_pkgs:
7243                         # Reduce noise by pruning packages that are only
7244                         # pulled in by other conflict packages.
7245                         pruned_pkgs = set()
7246                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7247                                 relevant_parent = False
7248                                 for parent, atom in parent_atoms:
7249                                         if parent not in conflict_pkgs:
7250                                                 relevant_parent = True
7251                                                 break
7252                                 if not relevant_parent:
7253                                         pruned_pkgs.add(pkg)
7254                         for pkg in pruned_pkgs:
7255                                 del conflict_pkgs[pkg]
7256
7257                 if conflict_pkgs:
7258                         msg = []
7259                         msg.append("\n")
7260                         indent = "  "
7261                         # Max number of parents shown, to avoid flooding the display.
7262                         max_parents = 3
7263                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7264
7265                                 pruned_list = set()
7266
7267                                 # Prefer packages that are not directly involved in a conflict.
7268                                 for parent_atom in parent_atoms:
7269                                         if len(pruned_list) >= max_parents:
7270                                                 break
7271                                         parent, atom = parent_atom
7272                                         if parent not in conflict_pkgs:
7273                                                 pruned_list.add(parent_atom)
7274
7275                                 for parent_atom in parent_atoms:
7276                                         if len(pruned_list) >= max_parents:
7277                                                 break
7278                                         pruned_list.add(parent_atom)
7279
7280                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7281                                 msg.append(indent + "%s pulled in by\n" % pkg)
7282
7283                                 for parent_atom in pruned_list:
7284                                         parent, atom = parent_atom
7285                                         msg.append(2*indent)
7286                                         if isinstance(parent,
7287                                                 (PackageArg, AtomArg)):
7288                                                 # For PackageArg and AtomArg types, it's
7289                                                 # redundant to display the atom attribute.
7290                                                 msg.append(str(parent))
7291                                         else:
7292                                                 # Display the specific atom from SetArg or
7293                                                 # Package types.
7294                                                 msg.append("%s required by %s" % (atom, parent))
7295                                         msg.append("\n")
7296
7297                                 if omitted_parents:
7298                                         msg.append(2*indent)
7299                                         msg.append("(and %d more)\n" % omitted_parents)
7300
7301                                 msg.append("\n")
7302
7303                         sys.stderr.write("".join(msg))
7304                         sys.stderr.flush()
7305
7306                 if "--quiet" not in self.myopts:
7307                         show_blocker_docs_link()
7308
7309         def display(self, mylist, favorites=[], verbosity=None):
7310
7311                 # This is used to prevent display_problems() from
7312                 # redundantly displaying this exact same merge list
7313                 # again via _show_merge_list().
7314                 self._displayed_list = mylist
7315
7316                 if verbosity is None:
7317                         verbosity = ("--quiet" in self.myopts and 1 or \
7318                                 "--verbose" in self.myopts and 3 or 2)
7319                 favorites_set = InternalPackageSet(favorites)
7320                 oneshot = "--oneshot" in self.myopts or \
7321                         "--onlydeps" in self.myopts
7322                 columns = "--columns" in self.myopts
7323                 changelogs=[]
7324                 p=[]
7325                 blockers = []
7326
7327                 counters = PackageCounters()
7328
7329                 if verbosity == 1 and "--verbose" not in self.myopts:
7330                         def create_use_string(*args):
7331                                 return ""
7332                 else:
7333                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7334                                 old_iuse, old_use,
7335                                 is_new, reinst_flags,
7336                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7337                                 alphabetical=("--alphabetical" in self.myopts)):
7338                                 enabled = []
7339                                 if alphabetical:
7340                                         disabled = enabled
7341                                         removed = enabled
7342                                 else:
7343                                         disabled = []
7344                                         removed = []
7345                                 cur_iuse = set(cur_iuse)
7346                                 enabled_flags = cur_iuse.intersection(cur_use)
7347                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7348                                 any_iuse = cur_iuse.union(old_iuse)
7349                                 any_iuse = list(any_iuse)
7350                                 any_iuse.sort()
7351                                 for flag in any_iuse:
7352                                         flag_str = None
7353                                         isEnabled = False
7354                                         reinst_flag = reinst_flags and flag in reinst_flags
7355                                         if flag in enabled_flags:
7356                                                 isEnabled = True
7357                                                 if is_new or flag in old_use and \
7358                                                         (all_flags or reinst_flag):
7359                                                         flag_str = red(flag)
7360                                                 elif flag not in old_iuse:
7361                                                         flag_str = yellow(flag) + "%*"
7362                                                 elif flag not in old_use:
7363                                                         flag_str = green(flag) + "*"
7364                                         elif flag in removed_iuse:
7365                                                 if all_flags or reinst_flag:
7366                                                         flag_str = yellow("-" + flag) + "%"
7367                                                         if flag in old_use:
7368                                                                 flag_str += "*"
7369                                                         flag_str = "(" + flag_str + ")"
7370                                                         removed.append(flag_str)
7371                                                 continue
7372                                         else:
7373                                                 if is_new or flag in old_iuse and \
7374                                                         flag not in old_use and \
7375                                                         (all_flags or reinst_flag):
7376                                                         flag_str = blue("-" + flag)
7377                                                 elif flag not in old_iuse:
7378                                                         flag_str = yellow("-" + flag)
7379                                                         if flag not in iuse_forced:
7380                                                                 flag_str += "%"
7381                                                 elif flag in old_use:
7382                                                         flag_str = green("-" + flag) + "*"
7383                                         if flag_str:
7384                                                 if flag in iuse_forced:
7385                                                         flag_str = "(" + flag_str + ")"
7386                                                 if isEnabled:
7387                                                         enabled.append(flag_str)
7388                                                 else:
7389                                                         disabled.append(flag_str)
7390
7391                                 if alphabetical:
7392                                         ret = " ".join(enabled)
7393                                 else:
7394                                         ret = " ".join(enabled + disabled + removed)
7395                                 if ret:
7396                                         ret = '%s="%s" ' % (name, ret)
7397                                 return ret
7398
7399                 repo_display = RepoDisplay(self.roots)
7400
7401                 tree_nodes = []
7402                 display_list = []
7403                 mygraph = self.digraph.copy()
7404
7405                 # If there are any Uninstall instances, add the corresponding
7406                 # blockers to the digraph (useful for --tree display).
7407
7408                 executed_uninstalls = set(node for node in mylist \
7409                         if isinstance(node, Package) and node.operation == "unmerge")
7410
7411                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7412                         uninstall_parents = \
7413                                 self._blocker_uninstalls.parent_nodes(uninstall)
7414                         if not uninstall_parents:
7415                                 continue
7416
7417                         # Remove the corresponding "nomerge" node and substitute
7418                         # the Uninstall node.
7419                         inst_pkg = self._pkg_cache[
7420                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7421                         try:
7422                                 mygraph.remove(inst_pkg)
7423                         except KeyError:
7424                                 pass
7425
7426                         try:
7427                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7428                         except KeyError:
7429                                 inst_pkg_blockers = []
7430
7431                         # Break the Package -> Uninstall edges.
7432                         mygraph.remove(uninstall)
7433
7434                         # Resolution of a package's blockers
7435                         # depend on it's own uninstallation.
7436                         for blocker in inst_pkg_blockers:
7437                                 mygraph.add(uninstall, blocker)
7438
7439                         # Expand Package -> Uninstall edges into
7440                         # Package -> Blocker -> Uninstall edges.
7441                         for blocker in uninstall_parents:
7442                                 mygraph.add(uninstall, blocker)
7443                                 for parent in self._blocker_parents.parent_nodes(blocker):
7444                                         if parent != inst_pkg:
7445                                                 mygraph.add(blocker, parent)
7446
7447                         # If the uninstall task did not need to be executed because
7448                         # of an upgrade, display Blocker -> Upgrade edges since the
7449                         # corresponding Blocker -> Uninstall edges will not be shown.
7450                         upgrade_node = \
7451                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7452                         if upgrade_node is not None and \
7453                                 uninstall not in executed_uninstalls:
7454                                 for blocker in uninstall_parents:
7455                                         mygraph.add(upgrade_node, blocker)
7456
7457                 unsatisfied_blockers = []
7458                 i = 0
7459                 depth = 0
7460                 shown_edges = set()
7461                 for x in mylist:
7462                         if isinstance(x, Blocker) and not x.satisfied:
7463                                 unsatisfied_blockers.append(x)
7464                                 continue
7465                         graph_key = x
7466                         if "--tree" in self.myopts:
7467                                 depth = len(tree_nodes)
7468                                 while depth and graph_key not in \
7469                                         mygraph.child_nodes(tree_nodes[depth-1]):
7470                                                 depth -= 1
7471                                 if depth:
7472                                         tree_nodes = tree_nodes[:depth]
7473                                         tree_nodes.append(graph_key)
7474                                         display_list.append((x, depth, True))
7475                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7476                                 else:
7477                                         traversed_nodes = set() # prevent endless circles
7478                                         traversed_nodes.add(graph_key)
7479                                         def add_parents(current_node, ordered):
7480                                                 parent_nodes = None
7481                                                 # Do not traverse to parents if this node is an
7482                                                 # an argument or a direct member of a set that has
7483                                                 # been specified as an argument (system or world).
7484                                                 if current_node not in self._set_nodes:
7485                                                         parent_nodes = mygraph.parent_nodes(current_node)
7486                                                 if parent_nodes:
7487                                                         child_nodes = set(mygraph.child_nodes(current_node))
7488                                                         selected_parent = None
7489                                                         # First, try to avoid a direct cycle.
7490                                                         for node in parent_nodes:
7491                                                                 if not isinstance(node, (Blocker, Package)):
7492                                                                         continue
7493                                                                 if node not in traversed_nodes and \
7494                                                                         node not in child_nodes:
7495                                                                         edge = (current_node, node)
7496                                                                         if edge in shown_edges:
7497                                                                                 continue
7498                                                                         selected_parent = node
7499                                                                         break
7500                                                         if not selected_parent:
7501                                                                 # A direct cycle is unavoidable.
7502                                                                 for node in parent_nodes:
7503                                                                         if not isinstance(node, (Blocker, Package)):
7504                                                                                 continue
7505                                                                         if node not in traversed_nodes:
7506                                                                                 edge = (current_node, node)
7507                                                                                 if edge in shown_edges:
7508                                                                                         continue
7509                                                                                 selected_parent = node
7510                                                                                 break
7511                                                         if selected_parent:
7512                                                                 shown_edges.add((current_node, selected_parent))
7513                                                                 traversed_nodes.add(selected_parent)
7514                                                                 add_parents(selected_parent, False)
7515                                                 display_list.append((current_node,
7516                                                         len(tree_nodes), ordered))
7517                                                 tree_nodes.append(current_node)
7518                                         tree_nodes = []
7519                                         add_parents(graph_key, True)
7520                         else:
7521                                 display_list.append((x, depth, True))
7522                 mylist = display_list
7523                 for x in unsatisfied_blockers:
7524                         mylist.append((x, 0, True))
7525
7526                 last_merge_depth = 0
7527                 for i in xrange(len(mylist)-1,-1,-1):
7528                         graph_key, depth, ordered = mylist[i]
7529                         if not ordered and depth == 0 and i > 0 \
7530                                 and graph_key == mylist[i-1][0] and \
7531                                 mylist[i-1][1] == 0:
7532                                 # An ordered node got a consecutive duplicate when the tree was
7533                                 # being filled in.
7534                                 del mylist[i]
7535                                 continue
7536                         if ordered and graph_key[-1] != "nomerge":
7537                                 last_merge_depth = depth
7538                                 continue
7539                         if depth >= last_merge_depth or \
7540                                 i < len(mylist) - 1 and \
7541                                 depth >= mylist[i+1][1]:
7542                                         del mylist[i]
7543
7544                 from portage import flatten
7545                 from portage.dep import use_reduce, paren_reduce
7546                 # files to fetch list - avoids counting a same file twice
7547                 # in size display (verbose mode)
7548                 myfetchlist=[]
7549
7550                 # Use this set to detect when all the "repoadd" strings are "[0]"
7551                 # and disable the entire repo display in this case.
7552                 repoadd_set = set()
7553
7554                 for mylist_index in xrange(len(mylist)):
7555                         x, depth, ordered = mylist[mylist_index]
7556                         pkg_type = x[0]
7557                         myroot = x[1]
7558                         pkg_key = x[2]
7559                         portdb = self.trees[myroot]["porttree"].dbapi
7560                         bindb  = self.trees[myroot]["bintree"].dbapi
7561                         vardb = self.trees[myroot]["vartree"].dbapi
7562                         vartree = self.trees[myroot]["vartree"]
7563                         pkgsettings = self.pkgsettings[myroot]
7564
7565                         fetch=" "
7566                         indent = " " * depth
7567
7568                         if isinstance(x, Blocker):
7569                                 if x.satisfied:
7570                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7571                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7572                                 else:
7573                                         blocker_style = "PKG_BLOCKER"
7574                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7575                                 if ordered:
7576                                         counters.blocks += 1
7577                                         if x.satisfied:
7578                                                 counters.blocks_satisfied += 1
7579                                 resolved = portage.key_expand(
7580                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7581                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7582                                         addl += " " + colorize(blocker_style, resolved)
7583                                 else:
7584                                         addl = "[%s %s] %s%s" % \
7585                                                 (colorize(blocker_style, "blocks"),
7586                                                 addl, indent, colorize(blocker_style, resolved))
7587                                 block_parents = self._blocker_parents.parent_nodes(x)
7588                                 block_parents = set([pnode[2] for pnode in block_parents])
7589                                 block_parents = ", ".join(block_parents)
7590                                 if resolved!=x[2]:
7591                                         addl += colorize(blocker_style,
7592                                                 " (\"%s\" is blocking %s)") % \
7593                                                 (str(x.atom).lstrip("!"), block_parents)
7594                                 else:
7595                                         addl += colorize(blocker_style,
7596                                                 " (is blocking %s)") % block_parents
7597                                 if isinstance(x, Blocker) and x.satisfied:
7598                                         if columns:
7599                                                 continue
7600                                         p.append(addl)
7601                                 else:
7602                                         blockers.append(addl)
7603                         else:
7604                                 pkg_status = x[3]
7605                                 pkg_merge = ordered and pkg_status == "merge"
7606                                 if not pkg_merge and pkg_status == "merge":
7607                                         pkg_status = "nomerge"
7608                                 built = pkg_type != "ebuild"
7609                                 installed = pkg_type == "installed"
7610                                 pkg = x
7611                                 metadata = pkg.metadata
7612                                 ebuild_path = None
7613                                 repo_name = metadata["repository"]
7614                                 if pkg_type == "ebuild":
7615                                         ebuild_path = portdb.findname(pkg_key)
7616                                         if not ebuild_path: # shouldn't happen
7617                                                 raise portage.exception.PackageNotFound(pkg_key)
7618                                         repo_path_real = os.path.dirname(os.path.dirname(
7619                                                 os.path.dirname(ebuild_path)))
7620                                 else:
7621                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7622                                 pkg_use = list(pkg.use.enabled)
7623                                 try:
7624                                         restrict = flatten(use_reduce(paren_reduce(
7625                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7626                                 except portage.exception.InvalidDependString, e:
7627                                         if not pkg.installed:
7628                                                 show_invalid_depstring_notice(x,
7629                                                         pkg.metadata["RESTRICT"], str(e))
7630                                                 del e
7631                                                 return 1
7632                                         restrict = []
7633                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7634                                         "fetch" in restrict:
7635                                         fetch = red("F")
7636                                         if ordered:
7637                                                 counters.restrict_fetch += 1
7638                                         if portdb.fetch_check(pkg_key, pkg_use):
7639                                                 fetch = green("f")
7640                                                 if ordered:
7641                                                         counters.restrict_fetch_satisfied += 1
7642
7643                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7644                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
7645                                 myoldbest = []
7646                                 myinslotlist = None
7647                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7648                                 if vardb.cpv_exists(pkg_key):
7649                                         addl="  "+yellow("R")+fetch+"  "
7650                                         if ordered:
7651                                                 if pkg_merge:
7652                                                         counters.reinst += 1
7653                                                 elif pkg_status == "uninstall":
7654                                                         counters.uninst += 1
7655                                 # filter out old-style virtual matches
7656                                 elif installed_versions and \
7657                                         portage.cpv_getkey(installed_versions[0]) == \
7658                                         portage.cpv_getkey(pkg_key):
7659                                         myinslotlist = vardb.match(pkg.slot_atom)
7660                                         # If this is the first install of a new-style virtual, we
7661                                         # need to filter out old-style virtual matches.
7662                                         if myinslotlist and \
7663                                                 portage.cpv_getkey(myinslotlist[0]) != \
7664                                                 portage.cpv_getkey(pkg_key):
7665                                                 myinslotlist = None
7666                                         if myinslotlist:
7667                                                 myoldbest = myinslotlist[:]
7668                                                 addl = "   " + fetch
7669                                                 if not portage.dep.cpvequal(pkg_key,
7670                                                         portage.best([pkg_key] + myoldbest)):
7671                                                         # Downgrade in slot
7672                                                         addl += turquoise("U")+blue("D")
7673                                                         if ordered:
7674                                                                 counters.downgrades += 1
7675                                                 else:
7676                                                         # Update in slot
7677                                                         addl += turquoise("U") + " "
7678                                                         if ordered:
7679                                                                 counters.upgrades += 1
7680                                         else:
7681                                                 # New slot, mark it new.
7682                                                 addl = " " + green("NS") + fetch + "  "
7683                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7684                                                 if ordered:
7685                                                         counters.newslot += 1
7686
7687                                         if "--changelog" in self.myopts:
7688                                                 inst_matches = vardb.match(pkg.slot_atom)
7689                                                 if inst_matches:
7690                                                         changelogs.extend(self.calc_changelog(
7691                                                                 portdb.findname(pkg_key),
7692                                                                 inst_matches[0], pkg_key))
7693                                 else:
7694                                         addl = " " + green("N") + " " + fetch + "  "
7695                                         if ordered:
7696                                                 counters.new += 1
7697
7698                                 verboseadd = ""
7699                                 repoadd = None
7700
7701                                 if True:
7702                                         # USE flag display
7703                                         forced_flags = set()
7704                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7705                                         forced_flags.update(pkgsettings.useforce)
7706                                         forced_flags.update(pkgsettings.usemask)
7707
7708                                         cur_use = [flag for flag in pkg.use.enabled \
7709                                                 if flag in pkg.iuse.all]
7710                                         cur_iuse = sorted(pkg.iuse.all)
7711
7712                                         if myoldbest and myinslotlist:
7713                                                 previous_cpv = myoldbest[0]
7714                                         else:
7715                                                 previous_cpv = pkg.cpv
7716                                         if vardb.cpv_exists(previous_cpv):
7717                                                 old_iuse, old_use = vardb.aux_get(
7718                                                                 previous_cpv, ["IUSE", "USE"])
7719                                                 old_iuse = list(set(
7720                                                         filter_iuse_defaults(old_iuse.split())))
7721                                                 old_iuse.sort()
7722                                                 old_use = old_use.split()
7723                                                 is_new = False
7724                                         else:
7725                                                 old_iuse = []
7726                                                 old_use = []
7727                                                 is_new = True
7728
7729                                         old_use = [flag for flag in old_use if flag in old_iuse]
7730
7731                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
7732                                         use_expand.sort()
7733                                         use_expand.reverse()
7734                                         use_expand_hidden = \
7735                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7736
7737                                         def map_to_use_expand(myvals, forcedFlags=False,
7738                                                 removeHidden=True):
7739                                                 ret = {}
7740                                                 forced = {}
7741                                                 for exp in use_expand:
7742                                                         ret[exp] = []
7743                                                         forced[exp] = set()
7744                                                         for val in myvals[:]:
7745                                                                 if val.startswith(exp.lower()+"_"):
7746                                                                         if val in forced_flags:
7747                                                                                 forced[exp].add(val[len(exp)+1:])
7748                                                                         ret[exp].append(val[len(exp)+1:])
7749                                                                         myvals.remove(val)
7750                                                 ret["USE"] = myvals
7751                                                 forced["USE"] = [val for val in myvals \
7752                                                         if val in forced_flags]
7753                                                 if removeHidden:
7754                                                         for exp in use_expand_hidden:
7755                                                                 ret.pop(exp, None)
7756                                                 if forcedFlags:
7757                                                         return ret, forced
7758                                                 return ret
7759
7760                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7761                                         # are the only thing that triggered reinstallation.
7762                                         reinst_flags_map = {}
7763                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
7764                                         reinst_expand_map = None
7765                                         if reinstall_for_flags:
7766                                                 reinst_flags_map = map_to_use_expand(
7767                                                         list(reinstall_for_flags), removeHidden=False)
7768                                                 for k in list(reinst_flags_map):
7769                                                         if not reinst_flags_map[k]:
7770                                                                 del reinst_flags_map[k]
7771                                                 if not reinst_flags_map.get("USE"):
7772                                                         reinst_expand_map = reinst_flags_map.copy()
7773                                                         reinst_expand_map.pop("USE", None)
7774                                         if reinst_expand_map and \
7775                                                 not set(reinst_expand_map).difference(
7776                                                 use_expand_hidden):
7777                                                 use_expand_hidden = \
7778                                                         set(use_expand_hidden).difference(
7779                                                         reinst_expand_map)
7780
7781                                         cur_iuse_map, iuse_forced = \
7782                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
7783                                         cur_use_map = map_to_use_expand(cur_use)
7784                                         old_iuse_map = map_to_use_expand(old_iuse)
7785                                         old_use_map = map_to_use_expand(old_use)
7786
7787                                         use_expand.sort()
7788                                         use_expand.insert(0, "USE")
7789                                         
7790                                         for key in use_expand:
7791                                                 if key in use_expand_hidden:
7792                                                         continue
7793                                                 verboseadd += create_use_string(key.upper(),
7794                                                         cur_iuse_map[key], iuse_forced[key],
7795                                                         cur_use_map[key], old_iuse_map[key],
7796                                                         old_use_map[key], is_new,
7797                                                         reinst_flags_map.get(key))
7798
7799                                 if verbosity == 3:
7800                                         # size verbose
7801                                         mysize=0
7802                                         if pkg_type == "ebuild" and pkg_merge:
7803                                                 try:
7804                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
7805                                                                 useflags=pkg_use, debug=self.edebug)
7806                                                 except portage.exception.InvalidDependString, e:
7807                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7808                                                         show_invalid_depstring_notice(x, src_uri, str(e))
7809                                                         del e
7810                                                         return 1
7811                                                 if myfilesdict is None:
7812                                                         myfilesdict="[empty/missing/bad digest]"
7813                                                 else:
7814                                                         for myfetchfile in myfilesdict:
7815                                                                 if myfetchfile not in myfetchlist:
7816                                                                         mysize+=myfilesdict[myfetchfile]
7817                                                                         myfetchlist.append(myfetchfile)
7818                                                         if ordered:
7819                                                                 counters.totalsize += mysize
7820                                                 verboseadd += format_size(mysize)
7821
7822                                         # overlay verbose
7823                                         # assign index for a previous version in the same slot
7824                                         has_previous = False
7825                                         repo_name_prev = None
7826                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7827                                                 metadata["SLOT"])
7828                                         slot_matches = vardb.match(slot_atom)
7829                                         if slot_matches:
7830                                                 has_previous = True
7831                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
7832                                                         ["repository"])[0]
7833
7834                                         # now use the data to generate output
7835                                         if pkg.installed or not has_previous:
7836                                                 repoadd = repo_display.repoStr(repo_path_real)
7837                                         else:
7838                                                 repo_path_prev = None
7839                                                 if repo_name_prev:
7840                                                         repo_path_prev = portdb.getRepositoryPath(
7841                                                                 repo_name_prev)
7842                                                 if repo_path_prev == repo_path_real:
7843                                                         repoadd = repo_display.repoStr(repo_path_real)
7844                                                 else:
7845                                                         repoadd = "%s=>%s" % (
7846                                                                 repo_display.repoStr(repo_path_prev),
7847                                                                 repo_display.repoStr(repo_path_real))
7848                                         if repoadd:
7849                                                 repoadd_set.add(repoadd)
7850
7851                                 xs = [portage.cpv_getkey(pkg_key)] + \
7852                                         list(portage.catpkgsplit(pkg_key)[2:])
7853                                 if xs[2] == "r0":
7854                                         xs[2] = ""
7855                                 else:
7856                                         xs[2] = "-" + xs[2]
7857
7858                                 mywidth = 130
7859                                 if "COLUMNWIDTH" in self.settings:
7860                                         try:
7861                                                 mywidth = int(self.settings["COLUMNWIDTH"])
7862                                         except ValueError, e:
7863                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7864                                                 portage.writemsg(
7865                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7866                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
7867                                                 del e
7868                                 oldlp = mywidth - 30
7869                                 newlp = oldlp - 30
7870
7871                                 # Convert myoldbest from a list to a string.
7872                                 if not myoldbest:
7873                                         myoldbest = ""
7874                                 else:
7875                                         for pos, key in enumerate(myoldbest):
7876                                                 key = portage.catpkgsplit(key)[2] + \
7877                                                         "-" + portage.catpkgsplit(key)[3]
7878                                                 if key[-3:] == "-r0":
7879                                                         key = key[:-3]
7880                                                 myoldbest[pos] = key
7881                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
7882
7883                                 pkg_cp = xs[0]
7884                                 root_config = self.roots[myroot]
7885                                 system_set = root_config.sets["system"]
7886                                 world_set  = root_config.sets["world"]
7887
7888                                 pkg_system = False
7889                                 pkg_world = False
7890                                 try:
7891                                         pkg_system = system_set.findAtomForPackage(pkg)
7892                                         pkg_world  = world_set.findAtomForPackage(pkg)
7893                                         if not (oneshot or pkg_world) and \
7894                                                 myroot == self.target_root and \
7895                                                 favorites_set.findAtomForPackage(pkg):
7896                                                 # Maybe it will be added to world now.
7897                                                 if create_world_atom(pkg, favorites_set, root_config):
7898                                                         pkg_world = True
7899                                 except portage.exception.InvalidDependString:
7900                                         # This is reported elsewhere if relevant.
7901                                         pass
7902
7903                                 def pkgprint(pkg_str):
7904                                         if pkg_merge:
7905                                                 if pkg_system:
7906                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
7907                                                 elif pkg_world:
7908                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
7909                                                 else:
7910                                                         return colorize("PKG_MERGE", pkg_str)
7911                                         elif pkg_status == "uninstall":
7912                                                 return colorize("PKG_UNINSTALL", pkg_str)
7913                                         else:
7914                                                 if pkg_system:
7915                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7916                                                 elif pkg_world:
7917                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
7918                                                 else:
7919                                                         return colorize("PKG_NOMERGE", pkg_str)
7920
7921                                 try:
7922                                         properties = flatten(use_reduce(paren_reduce(
7923                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7924                                 except portage.exception.InvalidDependString, e:
7925                                         if not pkg.installed:
7926                                                 show_invalid_depstring_notice(pkg,
7927                                                         pkg.metadata["PROPERTIES"], str(e))
7928                                                 del e
7929                                                 return 1
7930                                         properties = []
7931                                 interactive = "interactive" in properties
7932                                 if interactive and pkg.operation == "merge":
7933                                         addl = colorize("WARN", "I") + addl[1:]
7934                                         if ordered:
7935                                                 counters.interactive += 1
7936
7937                                 if x[1]!="/":
7938                                         if myoldbest:
7939                                                 myoldbest +=" "
7940                                         if "--columns" in self.myopts:
7941                                                 if "--quiet" in self.myopts:
7942                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7943                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7944                                                         myprint=myprint+myoldbest
7945                                                         myprint=myprint+darkgreen("to "+x[1])
7946                                                         verboseadd = None
7947                                                 else:
7948                                                         if not pkg_merge:
7949                                                                 myprint = "[%s] %s%s" % \
7950                                                                         (pkgprint(pkg_status.ljust(13)),
7951                                                                         indent, pkgprint(pkg.cp))
7952                                                         else:
7953                                                                 myprint = "[%s %s] %s%s" % \
7954                                                                         (pkgprint(pkg.type_name), addl,
7955                                                                         indent, pkgprint(pkg.cp))
7956                                                         if (newlp-nc_len(myprint)) > 0:
7957                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7958                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7959                                                         if (oldlp-nc_len(myprint)) > 0:
7960                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
7961                                                         myprint=myprint+myoldbest
7962                                                         myprint += darkgreen("to " + pkg.root)
7963                                         else:
7964                                                 if not pkg_merge:
7965                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7966                                                 else:
7967                                                         myprint = "[" + pkg_type + " " + addl + "] "
7968                                                 myprint += indent + pkgprint(pkg_key) + " " + \
7969                                                         myoldbest + darkgreen("to " + myroot)
7970                                 else:
7971                                         if "--columns" in self.myopts:
7972                                                 if "--quiet" in self.myopts:
7973                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7974                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
7975                                                         myprint=myprint+myoldbest
7976                                                         verboseadd = None
7977                                                 else:
7978                                                         if not pkg_merge:
7979                                                                 myprint = "[%s] %s%s" % \
7980                                                                         (pkgprint(pkg_status.ljust(13)),
7981                                                                         indent, pkgprint(pkg.cp))
7982                                                         else:
7983                                                                 myprint = "[%s %s] %s%s" % \
7984                                                                         (pkgprint(pkg.type_name), addl,
7985                                                                         indent, pkgprint(pkg.cp))
7986                                                         if (newlp-nc_len(myprint)) > 0:
7987                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7988                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7989                                                         if (oldlp-nc_len(myprint)) > 0:
7990                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7991                                                         myprint += myoldbest
7992                                         else:
7993                                                 if not pkg_merge:
7994                                                         myprint = "[%s] %s%s %s" % \
7995                                                                 (pkgprint(pkg_status.ljust(13)),
7996                                                                 indent, pkgprint(pkg.cpv),
7997                                                                 myoldbest)
7998                                                 else:
7999                                                         myprint = "[%s %s] %s%s %s" % \
8000                                                                 (pkgprint(pkg_type), addl, indent,
8001                                                                 pkgprint(pkg.cpv), myoldbest)
8002
8003                                 if columns and pkg.operation == "uninstall":
8004                                         continue
8005                                 p.append((myprint, verboseadd, repoadd))
8006
8007                                 if "--tree" not in self.myopts and \
8008                                         "--quiet" not in self.myopts and \
8009                                         not self._opts_no_restart.intersection(self.myopts) and \
8010                                         pkg.root == self._running_root.root and \
8011                                         portage.match_from_list(
8012                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8013                                         not vardb.cpv_exists(pkg.cpv) and \
8014                                         "--quiet" not in self.myopts:
8015                                                 if mylist_index < len(mylist) - 1:
8016                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8017                                                         p.append(colorize("WARN", "    then resume the merge."))
8018
8019                 out = sys.stdout
8020                 show_repos = repoadd_set and repoadd_set != set(["0"])
8021
8022                 for x in p:
8023                         if isinstance(x, basestring):
8024                                 out.write("%s\n" % (x,))
8025                                 continue
8026
8027                         myprint, verboseadd, repoadd = x
8028
8029                         if verboseadd:
8030                                 myprint += " " + verboseadd
8031
8032                         if show_repos and repoadd:
8033                                 myprint += " " + teal("[%s]" % repoadd)
8034
8035                         out.write("%s\n" % (myprint,))
8036
8037                 for x in blockers:
8038                         print x
8039
8040                 if verbosity == 3:
8041                         print
8042                         print counters
8043                         if show_repos:
8044                                 sys.stdout.write(str(repo_display))
8045
8046                 if "--changelog" in self.myopts:
8047                         print
8048                         for revision,text in changelogs:
8049                                 print bold('*'+revision)
8050                                 sys.stdout.write(text)
8051
8052                 sys.stdout.flush()
8053                 return os.EX_OK
8054
8055         def display_problems(self):
8056                 """
8057                 Display problems with the dependency graph such as slot collisions.
8058                 This is called internally by display() to show the problems _after_
8059                 the merge list where it is most likely to be seen, but if display()
8060                 is not going to be called then this method should be called explicitly
8061                 to ensure that the user is notified of problems with the graph.
8062
8063                 All output goes to stderr, except for unsatisfied dependencies which
8064                 go to stdout for parsing by programs such as autounmask.
8065                 """
8066
8067                 # Note that show_masked_packages() sends it's output to
8068                 # stdout, and some programs such as autounmask parse the
8069                 # output in cases when emerge bails out. However, when
8070                 # show_masked_packages() is called for installed packages
8071                 # here, the message is a warning that is more appropriate
8072                 # to send to stderr, so temporarily redirect stdout to
8073                 # stderr. TODO: Fix output code so there's a cleaner way
8074                 # to redirect everything to stderr.
8075                 sys.stdout.flush()
8076                 sys.stderr.flush()
8077                 stdout = sys.stdout
8078                 try:
8079                         sys.stdout = sys.stderr
8080                         self._display_problems()
8081                 finally:
8082                         sys.stdout = stdout
8083                         sys.stdout.flush()
8084                         sys.stderr.flush()
8085
8086                 # This goes to stdout for parsing by programs like autounmask.
8087                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8088                         self._show_unsatisfied_dep(*pargs, **kwargs)
8089
8090         def _display_problems(self):
8091                 if self._circular_deps_for_display is not None:
8092                         self._show_circular_deps(
8093                                 self._circular_deps_for_display)
8094
8095                 # The user is only notified of a slot conflict if
8096                 # there are no unresolvable blocker conflicts.
8097                 if self._unsatisfied_blockers_for_display is not None:
8098                         self._show_unsatisfied_blockers(
8099                                 self._unsatisfied_blockers_for_display)
8100                 else:
8101                         self._show_slot_collision_notice()
8102
8103                 # TODO: Add generic support for "set problem" handlers so that
8104                 # the below warnings aren't special cases for world only.
8105
8106                 if self._missing_args:
8107                         world_problems = False
8108                         if "world" in self._sets:
8109                                 # Filter out indirect members of world (from nested sets)
8110                                 # since only direct members of world are desired here.
8111                                 world_set = self.roots[self.target_root].sets["world"]
8112                                 for arg, atom in self._missing_args:
8113                                         if arg.name == "world" and atom in world_set:
8114                                                 world_problems = True
8115                                                 break
8116
8117                         if world_problems:
8118                                 sys.stderr.write("\n!!! Problems have been " + \
8119                                         "detected with your world file\n")
8120                                 sys.stderr.write("!!! Please run " + \
8121                                         green("emaint --check world")+"\n\n")
8122
8123                 if self._missing_args:
8124                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8125                                 " Ebuilds for the following packages are either all\n")
8126                         sys.stderr.write(colorize("BAD", "!!!") + \
8127                                 " masked or don't exist:\n")
8128                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8129                                 self._missing_args) + "\n")
8130
8131                 if self._pprovided_args:
8132                         arg_refs = {}
8133                         for arg, atom in self._pprovided_args:
8134                                 if isinstance(arg, SetArg):
8135                                         parent = arg.name
8136                                         arg_atom = (atom, atom)
8137                                 else:
8138                                         parent = "args"
8139                                         arg_atom = (arg.arg, atom)
8140                                 refs = arg_refs.setdefault(arg_atom, [])
8141                                 if parent not in refs:
8142                                         refs.append(parent)
8143                         msg = []
8144                         msg.append(bad("\nWARNING: "))
8145                         if len(self._pprovided_args) > 1:
8146                                 msg.append("Requested packages will not be " + \
8147                                         "merged because they are listed in\n")
8148                         else:
8149                                 msg.append("A requested package will not be " + \
8150                                         "merged because it is listed in\n")
8151                         msg.append("package.provided:\n\n")
8152                         problems_sets = set()
8153                         for (arg, atom), refs in arg_refs.iteritems():
8154                                 ref_string = ""
8155                                 if refs:
8156                                         problems_sets.update(refs)
8157                                         refs.sort()
8158                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8159                                         ref_string = " pulled in by " + ref_string
8160                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8161                         msg.append("\n")
8162                         if "world" in problems_sets:
8163                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8164                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8165                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8166                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8167                                 msg.append("The best course of action depends on the reason that an offending\n")
8168                                 msg.append("package.provided entry exists.\n\n")
8169                         sys.stderr.write("".join(msg))
8170
8171                 masked_packages = []
8172                 for pkg in self._masked_installed:
8173                         root_config = pkg.root_config
8174                         pkgsettings = self.pkgsettings[pkg.root]
8175                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8176                         masked_packages.append((root_config, pkgsettings,
8177                                 pkg.cpv, pkg.metadata, mreasons))
8178                 if masked_packages:
8179                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8180                                 " The following installed packages are masked:\n")
8181                         show_masked_packages(masked_packages)
8182                         show_mask_docs()
8183                         print
8184
8185         def calc_changelog(self,ebuildpath,current,next):
8186                 if ebuildpath == None or not os.path.exists(ebuildpath):
8187                         return []
8188                 current = '-'.join(portage.catpkgsplit(current)[1:])
8189                 if current.endswith('-r0'):
8190                         current = current[:-3]
8191                 next = '-'.join(portage.catpkgsplit(next)[1:])
8192                 if next.endswith('-r0'):
8193                         next = next[:-3]
8194                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8195                 try:
8196                         changelog = open(changelogpath).read()
8197                 except SystemExit, e:
8198                         raise # Needed else can't exit
8199                 except:
8200                         return []
8201                 divisions = self.find_changelog_tags(changelog)
8202                 #print 'XX from',current,'to',next
8203                 #for div,text in divisions: print 'XX',div
8204                 # skip entries for all revisions above the one we are about to emerge
8205                 for i in range(len(divisions)):
8206                         if divisions[i][0]==next:
8207                                 divisions = divisions[i:]
8208                                 break
8209                 # find out how many entries we are going to display
8210                 for i in range(len(divisions)):
8211                         if divisions[i][0]==current:
8212                                 divisions = divisions[:i]
8213                                 break
8214                 else:
8215                     # couldnt find the current revision in the list. display nothing
8216                         return []
8217                 return divisions
8218
8219         def find_changelog_tags(self,changelog):
8220                 divs = []
8221                 release = None
8222                 while 1:
8223                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8224                         if match is None:
8225                                 if release is not None:
8226                                         divs.append((release,changelog))
8227                                 return divs
8228                         if release is not None:
8229                                 divs.append((release,changelog[:match.start()]))
8230                         changelog = changelog[match.end():]
8231                         release = match.group(1)
8232                         if release.endswith('.ebuild'):
8233                                 release = release[:-7]
8234                         if release.endswith('-r0'):
8235                                 release = release[:-3]
8236
8237         def saveNomergeFavorites(self):
8238                 """Find atoms in favorites that are not in the mergelist and add them
8239                 to the world file if necessary."""
8240                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8241                         "--oneshot", "--onlydeps", "--pretend"):
8242                         if x in self.myopts:
8243                                 return
8244                 root_config = self.roots[self.target_root]
8245                 world_set = root_config.sets["world"]
8246
8247                 world_locked = False
8248                 if hasattr(world_set, "lock"):
8249                         world_set.lock()
8250                         world_locked = True
8251
8252                 if hasattr(world_set, "load"):
8253                         world_set.load() # maybe it's changed on disk
8254
8255                 args_set = self._sets["args"]
8256                 portdb = self.trees[self.target_root]["porttree"].dbapi
8257                 added_favorites = set()
8258                 for x in self._set_nodes:
8259                         pkg_type, root, pkg_key, pkg_status = x
8260                         if pkg_status != "nomerge":
8261                                 continue
8262
8263                         try:
8264                                 myfavkey = create_world_atom(x, args_set, root_config)
8265                                 if myfavkey:
8266                                         if myfavkey in added_favorites:
8267                                                 continue
8268                                         added_favorites.add(myfavkey)
8269                         except portage.exception.InvalidDependString, e:
8270                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8271                                         (pkg_key, str(e)), noiselevel=-1)
8272                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8273                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8274                                 del e
8275                 all_added = []
8276                 for k in self._sets:
8277                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8278                                 continue
8279                         s = SETPREFIX + k
8280                         if s in world_set:
8281                                 continue
8282                         all_added.append(SETPREFIX + k)
8283                 all_added.extend(added_favorites)
8284                 all_added.sort()
8285                 for a in all_added:
8286                         print ">>> Recording %s in \"world\" favorites file..." % \
8287                                 colorize("INFORM", str(a))
8288                 if all_added:
8289                         world_set.update(all_added)
8290
8291                 if world_locked:
8292                         world_set.unlock()
8293
8294         def loadResumeCommand(self, resume_data, skip_masked=False):
8295                 """
8296                 Add a resume command to the graph and validate it in the process.  This
8297                 will raise a PackageNotFound exception if a package is not available.
8298                 """
8299
8300                 if not isinstance(resume_data, dict):
8301                         return False
8302
8303                 mergelist = resume_data.get("mergelist")
8304                 if not isinstance(mergelist, list):
8305                         mergelist = []
8306
8307                 fakedb = self.mydbapi
8308                 trees = self.trees
8309                 serialized_tasks = []
8310                 masked_tasks = []
8311                 for x in mergelist:
8312                         if not (isinstance(x, list) and len(x) == 4):
8313                                 continue
8314                         pkg_type, myroot, pkg_key, action = x
8315                         if pkg_type not in self.pkg_tree_map:
8316                                 continue
8317                         if action != "merge":
8318                                 continue
8319                         tree_type = self.pkg_tree_map[pkg_type]
8320                         mydb = trees[myroot][tree_type].dbapi
8321                         db_keys = list(self._trees_orig[myroot][
8322                                 tree_type].dbapi._aux_cache_keys)
8323                         try:
8324                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8325                         except KeyError:
8326                                 # It does no exist or it is corrupt.
8327                                 if action == "uninstall":
8328                                         continue
8329                                 raise portage.exception.PackageNotFound(pkg_key)
8330                         installed = action == "uninstall"
8331                         built = pkg_type != "ebuild"
8332                         root_config = self.roots[myroot]
8333                         pkg = Package(built=built, cpv=pkg_key,
8334                                 installed=installed, metadata=metadata,
8335                                 operation=action, root_config=root_config,
8336                                 type_name=pkg_type)
8337                         if pkg_type == "ebuild":
8338                                 pkgsettings = self.pkgsettings[myroot]
8339                                 pkgsettings.setcpv(pkg)
8340                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8341                         self._pkg_cache[pkg] = pkg
8342
8343                         root_config = self.roots[pkg.root]
8344                         if "merge" == pkg.operation and \
8345                                 not visible(root_config.settings, pkg):
8346                                 if skip_masked:
8347                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8348                                 else:
8349                                         self._unsatisfied_deps_for_display.append(
8350                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8351
8352                         fakedb[myroot].cpv_inject(pkg)
8353                         serialized_tasks.append(pkg)
8354                         self.spinner.update()
8355
8356                 if self._unsatisfied_deps_for_display:
8357                         return False
8358
8359                 if not serialized_tasks or "--nodeps" in self.myopts:
8360                         self._serialized_tasks_cache = serialized_tasks
8361                         self._scheduler_graph = self.digraph
8362                 else:
8363                         self._select_package = self._select_pkg_from_graph
8364                         self.myparams.add("selective")
8365
8366                         favorites = resume_data.get("favorites")
8367                         args_set = self._sets["args"]
8368                         if isinstance(favorites, list):
8369                                 args = self._load_favorites(favorites)
8370                         else:
8371                                 args = []
8372
8373                         for task in serialized_tasks:
8374                                 if isinstance(task, Package) and \
8375                                         task.operation == "merge":
8376                                         if not self._add_pkg(task, None):
8377                                                 return False
8378
8379                         # Packages for argument atoms need to be explicitly
8380                         # added via _add_pkg() so that they are included in the
8381                         # digraph (needed at least for --tree display).
8382                         for arg in args:
8383                                 for atom in arg.set:
8384                                         pkg, existing_node = self._select_package(
8385                                                 arg.root_config.root, atom)
8386                                         if existing_node is None and \
8387                                                 pkg is not None:
8388                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8389                                                         root=pkg.root, parent=arg)):
8390                                                         return False
8391
8392                         # Allow unsatisfied deps here to avoid showing a masking
8393                         # message for an unsatisfied dep that isn't necessarily
8394                         # masked.
8395                         if not self._create_graph(allow_unsatisfied=True):
8396                                 return False
8397                         if masked_tasks or self._unsatisfied_deps:
8398                                 # This probably means that a required package
8399                                 # was dropped via --skipfirst. It makes the
8400                                 # resume list invalid, so convert it to a
8401                                 # UnsatisfiedResumeDep exception.
8402                                 raise self.UnsatisfiedResumeDep(self,
8403                                         masked_tasks + self._unsatisfied_deps)
8404                         self._serialized_tasks_cache = None
8405                         try:
8406                                 self.altlist()
8407                         except self._unknown_internal_error:
8408                                 return False
8409
8410                 return True
8411
8412         def _load_favorites(self, favorites):
8413                 """
8414                 Use a list of favorites to resume state from a
8415                 previous select_files() call. This creates similar
8416                 DependencyArg instances to those that would have
8417                 been created by the original select_files() call.
8418                 This allows Package instances to be matched with
8419                 DependencyArg instances during graph creation.
8420                 """
8421                 root_config = self.roots[self.target_root]
8422                 getSetAtoms = root_config.setconfig.getSetAtoms
8423                 sets = root_config.sets
8424                 args = []
8425                 for x in favorites:
8426                         if not isinstance(x, basestring):
8427                                 continue
8428                         if x in ("system", "world"):
8429                                 x = SETPREFIX + x
8430                         if x.startswith(SETPREFIX):
8431                                 s = x[len(SETPREFIX):]
8432                                 if s not in sets:
8433                                         continue
8434                                 if s in self._sets:
8435                                         continue
8436                                 # Recursively expand sets so that containment tests in
8437                                 # self._get_parent_sets() properly match atoms in nested
8438                                 # sets (like if world contains system).
8439                                 expanded_set = InternalPackageSet(
8440                                         initial_atoms=getSetAtoms(s))
8441                                 self._sets[s] = expanded_set
8442                                 args.append(SetArg(arg=x, set=expanded_set,
8443                                         root_config=root_config))
8444                         else:
8445                                 if not portage.isvalidatom(x):
8446                                         continue
8447                                 args.append(AtomArg(arg=x, atom=x,
8448                                         root_config=root_config))
8449
8450                 # Create the "args" package set from atoms and
8451                 # packages given as arguments.
8452                 args_set = self._sets["args"]
8453                 for arg in args:
8454                         if not isinstance(arg, (AtomArg, PackageArg)):
8455                                 continue
8456                         myatom = arg.atom
8457                         if myatom in args_set:
8458                                 continue
8459                         args_set.add(myatom)
8460                 self._set_atoms.update(chain(*self._sets.itervalues()))
8461                 atom_arg_map = self._atom_arg_map
8462                 for arg in args:
8463                         for atom in arg.set:
8464                                 atom_key = (atom, arg.root_config.root)
8465                                 refs = atom_arg_map.get(atom_key)
8466                                 if refs is None:
8467                                         refs = []
8468                                         atom_arg_map[atom_key] = refs
8469                                         if arg not in refs:
8470                                                 refs.append(arg)
8471                 return args
8472
8473         class UnsatisfiedResumeDep(portage.exception.PortageException):
8474                 """
8475                 A dependency of a resume list is not installed. This
8476                 can occur when a required package is dropped from the
8477                 merge list via --skipfirst.
8478                 """
8479                 def __init__(self, depgraph, value):
8480                         portage.exception.PortageException.__init__(self, value)
8481                         self.depgraph = depgraph
8482
8483         class _internal_exception(portage.exception.PortageException):
8484                 def __init__(self, value=""):
8485                         portage.exception.PortageException.__init__(self, value)
8486
8487         class _unknown_internal_error(_internal_exception):
8488                 """
8489                 Used by the depgraph internally to terminate graph creation.
8490                 The specific reason for the failure should have been dumped
8491                 to stderr, unfortunately, the exact reason for the failure
8492                 may not be known.
8493                 """
8494
8495         class _serialize_tasks_retry(_internal_exception):
8496                 """
8497                 This is raised by the _serialize_tasks() method when it needs to
8498                 be called again for some reason. The only case that it's currently
8499                 used for is when neglected dependencies need to be added to the
8500                 graph in order to avoid making a potentially unsafe decision.
8501                 """
8502
8503         class _dep_check_composite_db(portage.dbapi):
8504                 """
8505                 A dbapi-like interface that is optimized for use in dep_check() calls.
8506                 This is built on top of the existing depgraph package selection logic.
8507                 Some packages that have been added to the graph may be masked from this
8508                 view in order to influence the atom preference selection that occurs
8509                 via dep_check().
8510                 """
8511                 def __init__(self, depgraph, root):
8512                         portage.dbapi.__init__(self)
8513                         self._depgraph = depgraph
8514                         self._root = root
8515                         self._match_cache = {}
8516                         self._cpv_pkg_map = {}
8517
8518                 def match(self, atom):
8519                         ret = self._match_cache.get(atom)
8520                         if ret is not None:
8521                                 return ret[:]
8522                         orig_atom = atom
8523                         if "/" not in atom:
8524                                 atom = self._dep_expand(atom)
8525                         pkg, existing = self._depgraph._select_package(self._root, atom)
8526                         if not pkg:
8527                                 ret = []
8528                         else:
8529                                 # Return the highest available from select_package() as well as
8530                                 # any matching slots in the graph db.
8531                                 slots = set()
8532                                 slots.add(pkg.metadata["SLOT"])
8533                                 atom_cp = portage.dep_getkey(atom)
8534                                 if pkg.cp.startswith("virtual/"):
8535                                         # For new-style virtual lookahead that occurs inside
8536                                         # dep_check(), examine all slots. This is needed
8537                                         # so that newer slots will not unnecessarily be pulled in
8538                                         # when a satisfying lower slot is already installed. For
8539                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8540                                         # there's no need to pull in a newer slot to satisfy a
8541                                         # virtual/jdk dependency.
8542                                         for db, pkg_type, built, installed, db_keys in \
8543                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8544                                                 for cpv in db.match(atom):
8545                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8546                                                                 continue
8547                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8548                                 ret = []
8549                                 if self._visible(pkg):
8550                                         self._cpv_pkg_map[pkg.cpv] = pkg
8551                                         ret.append(pkg.cpv)
8552                                 slots.remove(pkg.metadata["SLOT"])
8553                                 while slots:
8554                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8555                                         pkg, existing = self._depgraph._select_package(
8556                                                 self._root, slot_atom)
8557                                         if not pkg:
8558                                                 continue
8559                                         if not self._visible(pkg):
8560                                                 continue
8561                                         self._cpv_pkg_map[pkg.cpv] = pkg
8562                                         ret.append(pkg.cpv)
8563                                 if ret:
8564                                         self._cpv_sort_ascending(ret)
8565                         self._match_cache[orig_atom] = ret
8566                         return ret[:]
8567
8568                 def _visible(self, pkg):
8569                         if pkg.installed and "selective" not in self._depgraph.myparams:
8570                                 try:
8571                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8572                                 except (StopIteration, portage.exception.InvalidDependString):
8573                                         arg = None
8574                                 if arg:
8575                                         return False
8576                         if pkg.installed:
8577                                 try:
8578                                         if not visible(
8579                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8580                                                 return False
8581                                 except portage.exception.InvalidDependString:
8582                                         pass
8583                         return True
8584
8585                 def _dep_expand(self, atom):
8586                         """
8587                         This is only needed for old installed packages that may
8588                         contain atoms that are not fully qualified with a specific
8589                         category. Emulate the cpv_expand() function that's used by
8590                         dbapi.match() in cases like this. If there are multiple
8591                         matches, it's often due to a new-style virtual that has
8592                         been added, so try to filter those out to avoid raising
8593                         a ValueError.
8594                         """
8595                         root_config = self._depgraph.roots[self._root]
8596                         orig_atom = atom
8597                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8598                         if len(expanded_atoms) > 1:
8599                                 non_virtual_atoms = []
8600                                 for x in expanded_atoms:
8601                                         if not portage.dep_getkey(x).startswith("virtual/"):
8602                                                 non_virtual_atoms.append(x)
8603                                 if len(non_virtual_atoms) == 1:
8604                                         expanded_atoms = non_virtual_atoms
8605                         if len(expanded_atoms) > 1:
8606                                 # compatible with portage.cpv_expand()
8607                                 raise portage.exception.AmbiguousPackageName(
8608                                         [portage.dep_getkey(x) for x in expanded_atoms])
8609                         if expanded_atoms:
8610                                 atom = expanded_atoms[0]
8611                         else:
8612                                 null_atom = insert_category_into_atom(atom, "null")
8613                                 null_cp = portage.dep_getkey(null_atom)
8614                                 cat, atom_pn = portage.catsplit(null_cp)
8615                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8616                                 if virts_p:
8617                                         # Allow the resolver to choose which virtual.
8618                                         atom = insert_category_into_atom(atom, "virtual")
8619                                 else:
8620                                         atom = insert_category_into_atom(atom, "null")
8621                         return atom
8622
8623                 def aux_get(self, cpv, wants):
8624                         metadata = self._cpv_pkg_map[cpv].metadata
8625                         return [metadata.get(x, "") for x in wants]
8626
8627 class RepoDisplay(object):
8628         def __init__(self, roots):
8629                 self._shown_repos = {}
8630                 self._unknown_repo = False
8631                 repo_paths = set()
8632                 for root_config in roots.itervalues():
8633                         portdir = root_config.settings.get("PORTDIR")
8634                         if portdir:
8635                                 repo_paths.add(portdir)
8636                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
8637                         if overlays:
8638                                 repo_paths.update(overlays.split())
8639                 repo_paths = list(repo_paths)
8640                 self._repo_paths = repo_paths
8641                 self._repo_paths_real = [ os.path.realpath(repo_path) \
8642                         for repo_path in repo_paths ]
8643
8644                 # pre-allocate index for PORTDIR so that it always has index 0.
8645                 for root_config in roots.itervalues():
8646                         portdb = root_config.trees["porttree"].dbapi
8647                         portdir = portdb.porttree_root
8648                         if portdir:
8649                                 self.repoStr(portdir)
8650
8651         def repoStr(self, repo_path_real):
8652                 real_index = -1
8653                 if repo_path_real:
8654                         real_index = self._repo_paths_real.index(repo_path_real)
8655                 if real_index == -1:
8656                         s = "?"
8657                         self._unknown_repo = True
8658                 else:
8659                         shown_repos = self._shown_repos
8660                         repo_paths = self._repo_paths
8661                         repo_path = repo_paths[real_index]
8662                         index = shown_repos.get(repo_path)
8663                         if index is None:
8664                                 index = len(shown_repos)
8665                                 shown_repos[repo_path] = index
8666                         s = str(index)
8667                 return s
8668
8669         def __str__(self):
8670                 output = []
8671                 shown_repos = self._shown_repos
8672                 unknown_repo = self._unknown_repo
8673                 if shown_repos or self._unknown_repo:
8674                         output.append("Portage tree and overlays:\n")
8675                 show_repo_paths = list(shown_repos)
8676                 for repo_path, repo_index in shown_repos.iteritems():
8677                         show_repo_paths[repo_index] = repo_path
8678                 if show_repo_paths:
8679                         for index, repo_path in enumerate(show_repo_paths):
8680                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8681                 if unknown_repo:
8682                         output.append(" "+teal("[?]") + \
8683                                 " indicates that the source repository could not be determined\n")
8684                 return "".join(output)
8685
8686 class PackageCounters(object):
8687
8688         def __init__(self):
8689                 self.upgrades   = 0
8690                 self.downgrades = 0
8691                 self.new        = 0
8692                 self.newslot    = 0
8693                 self.reinst     = 0
8694                 self.uninst     = 0
8695                 self.blocks     = 0
8696                 self.blocks_satisfied         = 0
8697                 self.totalsize  = 0
8698                 self.restrict_fetch           = 0
8699                 self.restrict_fetch_satisfied = 0
8700                 self.interactive              = 0
8701
8702         def __str__(self):
8703                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8704                 myoutput = []
8705                 details = []
8706                 myoutput.append("Total: %s package" % total_installs)
8707                 if total_installs != 1:
8708                         myoutput.append("s")
8709                 if total_installs != 0:
8710                         myoutput.append(" (")
8711                 if self.upgrades > 0:
8712                         details.append("%s upgrade" % self.upgrades)
8713                         if self.upgrades > 1:
8714                                 details[-1] += "s"
8715                 if self.downgrades > 0:
8716                         details.append("%s downgrade" % self.downgrades)
8717                         if self.downgrades > 1:
8718                                 details[-1] += "s"
8719                 if self.new > 0:
8720                         details.append("%s new" % self.new)
8721                 if self.newslot > 0:
8722                         details.append("%s in new slot" % self.newslot)
8723                         if self.newslot > 1:
8724                                 details[-1] += "s"
8725                 if self.reinst > 0:
8726                         details.append("%s reinstall" % self.reinst)
8727                         if self.reinst > 1:
8728                                 details[-1] += "s"
8729                 if self.uninst > 0:
8730                         details.append("%s uninstall" % self.uninst)
8731                         if self.uninst > 1:
8732                                 details[-1] += "s"
8733                 if self.interactive > 0:
8734                         details.append("%s %s" % (self.interactive,
8735                                 colorize("WARN", "interactive")))
8736                 myoutput.append(", ".join(details))
8737                 if total_installs != 0:
8738                         myoutput.append(")")
8739                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8740                 if self.restrict_fetch:
8741                         myoutput.append("\nFetch Restriction: %s package" % \
8742                                 self.restrict_fetch)
8743                         if self.restrict_fetch > 1:
8744                                 myoutput.append("s")
8745                 if self.restrict_fetch_satisfied < self.restrict_fetch:
8746                         myoutput.append(bad(" (%s unsatisfied)") % \
8747                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
8748                 if self.blocks > 0:
8749                         myoutput.append("\nConflict: %s block" % \
8750                                 self.blocks)
8751                         if self.blocks > 1:
8752                                 myoutput.append("s")
8753                         if self.blocks_satisfied < self.blocks:
8754                                 myoutput.append(bad(" (%s unsatisfied)") % \
8755                                         (self.blocks - self.blocks_satisfied))
8756                 return "".join(myoutput)
8757
8758 class PollSelectAdapter(PollConstants):
8759
8760         """
8761         Use select to emulate a poll object, for
8762         systems that don't support poll().
8763         """
8764
8765         def __init__(self):
8766                 self._registered = {}
8767                 self._select_args = [[], [], []]
8768
8769         def register(self, fd, *args):
8770                 """
8771                 Only POLLIN is currently supported!
8772                 """
8773                 if len(args) > 1:
8774                         raise TypeError(
8775                                 "register expected at most 2 arguments, got " + \
8776                                 repr(1 + len(args)))
8777
8778                 eventmask = PollConstants.POLLIN | \
8779                         PollConstants.POLLPRI | PollConstants.POLLOUT
8780                 if args:
8781                         eventmask = args[0]
8782
8783                 self._registered[fd] = eventmask
8784                 self._select_args = None
8785
8786         def unregister(self, fd):
8787                 self._select_args = None
8788                 del self._registered[fd]
8789
8790         def poll(self, *args):
8791                 if len(args) > 1:
8792                         raise TypeError(
8793                                 "poll expected at most 2 arguments, got " + \
8794                                 repr(1 + len(args)))
8795
8796                 timeout = None
8797                 if args:
8798                         timeout = args[0]
8799
8800                 select_args = self._select_args
8801                 if select_args is None:
8802                         select_args = [self._registered.keys(), [], []]
8803
8804                 if timeout is not None:
8805                         select_args = select_args[:]
8806                         # Translate poll() timeout args to select() timeout args:
8807                         #
8808                         #          | units        | value(s) for indefinite block
8809                         # ---------|--------------|------------------------------
8810                         #   poll   | milliseconds | omitted, negative, or None
8811                         # ---------|--------------|------------------------------
8812                         #   select | seconds      | omitted
8813                         # ---------|--------------|------------------------------
8814
8815                         if timeout is not None and timeout < 0:
8816                                 timeout = None
8817                         if timeout is not None:
8818                                 select_args.append(timeout / 1000)
8819
8820                 select_events = select.select(*select_args)
8821                 poll_events = []
8822                 for fd in select_events[0]:
8823                         poll_events.append((fd, PollConstants.POLLIN))
8824                 return poll_events
8825
8826 class SequentialTaskQueue(SlotObject):
8827
8828         __slots__ = ("max_jobs", "running_tasks") + \
8829                 ("_dirty", "_scheduling", "_task_queue")
8830
8831         def __init__(self, **kwargs):
8832                 SlotObject.__init__(self, **kwargs)
8833                 self._task_queue = deque()
8834                 self.running_tasks = set()
8835                 if self.max_jobs is None:
8836                         self.max_jobs = 1
8837                 self._dirty = True
8838
8839         def add(self, task):
8840                 self._task_queue.append(task)
8841                 self._dirty = True
8842
8843         def addFront(self, task):
8844                 self._task_queue.appendleft(task)
8845                 self._dirty = True
8846
8847         def schedule(self):
8848
8849                 if not self._dirty:
8850                         return False
8851
8852                 if not self:
8853                         return False
8854
8855                 if self._scheduling:
8856                         # Ignore any recursive schedule() calls triggered via
8857                         # self._task_exit().
8858                         return False
8859
8860                 self._scheduling = True
8861
8862                 task_queue = self._task_queue
8863                 running_tasks = self.running_tasks
8864                 max_jobs = self.max_jobs
8865                 state_changed = False
8866
8867                 while task_queue and \
8868                         (max_jobs is True or len(running_tasks) < max_jobs):
8869                         task = task_queue.popleft()
8870                         cancelled = getattr(task, "cancelled", None)
8871                         if not cancelled:
8872                                 running_tasks.add(task)
8873                                 task.addExitListener(self._task_exit)
8874                                 task.start()
8875                         state_changed = True
8876
8877                 self._dirty = False
8878                 self._scheduling = False
8879
8880                 return state_changed
8881
8882         def _task_exit(self, task):
8883                 """
8884                 Since we can always rely on exit listeners being called, the set of
8885                 running tasks is always pruned automatically and there is never any need
8886                 to actively prune it.
8887                 """
8888                 self.running_tasks.remove(task)
8889                 if self._task_queue:
8890                         self._dirty = True
8891
8892         def clear(self):
8893                 self._task_queue.clear()
8894                 running_tasks = self.running_tasks
8895                 while running_tasks:
8896                         task = running_tasks.pop()
8897                         task.removeExitListener(self._task_exit)
8898                         task.cancel()
8899                 self._dirty = False
8900
8901         def __nonzero__(self):
8902                 return bool(self._task_queue or self.running_tasks)
8903
8904         def __len__(self):
8905                 return len(self._task_queue) + len(self.running_tasks)
8906
8907 _can_poll_device = None
8908
8909 def can_poll_device():
8910         """
8911         Test if it's possible to use poll() on a device such as a pty. This
8912         is known to fail on Darwin.
8913         @rtype: bool
8914         @returns: True if poll() on a device succeeds, False otherwise.
8915         """
8916
8917         global _can_poll_device
8918         if _can_poll_device is not None:
8919                 return _can_poll_device
8920
8921         if not hasattr(select, "poll"):
8922                 _can_poll_device = False
8923                 return _can_poll_device
8924
8925         try:
8926                 dev_null = open('/dev/null', 'rb')
8927         except IOError:
8928                 _can_poll_device = False
8929                 return _can_poll_device
8930
8931         p = select.poll()
8932         p.register(dev_null.fileno(), PollConstants.POLLIN)
8933
8934         invalid_request = False
8935         for f, event in p.poll():
8936                 if event & PollConstants.POLLNVAL:
8937                         invalid_request = True
8938                         break
8939         dev_null.close()
8940
8941         _can_poll_device = not invalid_request
8942         return _can_poll_device
8943
8944 def create_poll_instance():
8945         """
8946         Create an instance of select.poll, or an instance of
8947         PollSelectAdapter there is no poll() implementation or
8948         it is broken somehow.
8949         """
8950         if can_poll_device():
8951                 return select.poll()
8952         return PollSelectAdapter()
8953
8954 class PollScheduler(object):
8955
8956         class _sched_iface_class(SlotObject):
8957                 __slots__ = ("register", "schedule", "unregister")
8958
8959         def __init__(self):
8960                 self._max_jobs = 1
8961                 self._max_load = None
8962                 self._jobs = 0
8963                 self._poll_event_queue = []
8964                 self._poll_event_handlers = {}
8965                 self._poll_event_handler_ids = {}
8966                 # Increment id for each new handler.
8967                 self._event_handler_id = 0
8968                 self._poll_obj = create_poll_instance()
8969                 self._scheduling = False
8970
8971         def _schedule(self):
8972                 """
8973                 Calls _schedule_tasks() and automatically returns early from
8974                 any recursive calls to this method that the _schedule_tasks()
8975                 call might trigger. This makes _schedule() safe to call from
8976                 inside exit listeners.
8977                 """
8978                 if self._scheduling:
8979                         return False
8980                 self._scheduling = True
8981                 try:
8982                         return self._schedule_tasks()
8983                 finally:
8984                         self._scheduling = False
8985
8986         def _running_job_count(self):
8987                 return self._jobs
8988
8989         def _can_add_job(self):
8990                 max_jobs = self._max_jobs
8991                 max_load = self._max_load
8992
8993                 if self._max_jobs is not True and \
8994                         self._running_job_count() >= self._max_jobs:
8995                         return False
8996
8997                 if max_load is not None and \
8998                         (max_jobs is True or max_jobs > 1) and \
8999                         self._running_job_count() >= 1:
9000                         try:
9001                                 avg1, avg5, avg15 = os.getloadavg()
9002                         except (AttributeError, OSError), e:
9003                                 writemsg("!!! getloadavg() failed: %s\n" % (e,),
9004                                         noiselevel=-1)
9005                                 del e
9006                                 return False
9007
9008                         if avg1 >= max_load:
9009                                 return False
9010
9011                 return True
9012
9013         def _poll(self, timeout=None):
9014                 """
9015                 All poll() calls pass through here. The poll events
9016                 are added directly to self._poll_event_queue.
9017                 In order to avoid endless blocking, this raises
9018                 StopIteration if timeout is None and there are
9019                 no file descriptors to poll.
9020                 """
9021                 if not self._poll_event_handlers:
9022                         self._schedule()
9023                         if timeout is None and \
9024                                 not self._poll_event_handlers:
9025                                 raise StopIteration(
9026                                         "timeout is None and there are no poll() event handlers")
9027
9028                 # The following error is known to occur with Linux kernel versions
9029                 # less than 2.6.24:
9030                 #
9031                 #   select.error: (4, 'Interrupted system call')
9032                 #
9033                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9034                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9035                 # without any events.
9036                 while True:
9037                         try:
9038                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9039                                 break
9040                         except select.error, e:
9041                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9042                                         level=logging.ERROR, noiselevel=-1)
9043                                 del e
9044                                 if timeout is not None:
9045                                         break
9046
9047         def _next_poll_event(self, timeout=None):
9048                 """
9049                 Since the _schedule_wait() loop is called by event
9050                 handlers from _poll_loop(), maintain a central event
9051                 queue for both of them to share events from a single
9052                 poll() call. In order to avoid endless blocking, this
9053                 raises StopIteration if timeout is None and there are
9054                 no file descriptors to poll.
9055                 """
9056                 if not self._poll_event_queue:
9057                         self._poll(timeout)
9058                 return self._poll_event_queue.pop()
9059
9060         def _poll_loop(self):
9061
9062                 event_handlers = self._poll_event_handlers
9063                 event_handled = False
9064
9065                 try:
9066                         while event_handlers:
9067                                 f, event = self._next_poll_event()
9068                                 handler, reg_id = event_handlers[f]
9069                                 handler(f, event)
9070                                 event_handled = True
9071                 except StopIteration:
9072                         event_handled = True
9073
9074                 if not event_handled:
9075                         raise AssertionError("tight loop")
9076
9077         def _schedule_yield(self):
9078                 """
9079                 Schedule for a short period of time chosen by the scheduler based
9080                 on internal state. Synchronous tasks should call this periodically
9081                 in order to allow the scheduler to service pending poll events. The
9082                 scheduler will call poll() exactly once, without blocking, and any
9083                 resulting poll events will be serviced.
9084                 """
9085                 event_handlers = self._poll_event_handlers
9086                 events_handled = 0
9087
9088                 if not event_handlers:
9089                         return bool(events_handled)
9090
9091                 if not self._poll_event_queue:
9092                         self._poll(0)
9093
9094                 try:
9095                         while event_handlers and self._poll_event_queue:
9096                                 f, event = self._next_poll_event()
9097                                 handler, reg_id = event_handlers[f]
9098                                 handler(f, event)
9099                                 events_handled += 1
9100                 except StopIteration:
9101                         events_handled += 1
9102
9103                 return bool(events_handled)
9104
9105         def _register(self, f, eventmask, handler):
9106                 """
9107                 @rtype: Integer
9108                 @return: A unique registration id, for use in schedule() or
9109                         unregister() calls.
9110                 """
9111                 if f in self._poll_event_handlers:
9112                         raise AssertionError("fd %d is already registered" % f)
9113                 self._event_handler_id += 1
9114                 reg_id = self._event_handler_id
9115                 self._poll_event_handler_ids[reg_id] = f
9116                 self._poll_event_handlers[f] = (handler, reg_id)
9117                 self._poll_obj.register(f, eventmask)
9118                 return reg_id
9119
9120         def _unregister(self, reg_id):
9121                 f = self._poll_event_handler_ids[reg_id]
9122                 self._poll_obj.unregister(f)
9123                 del self._poll_event_handlers[f]
9124                 del self._poll_event_handler_ids[reg_id]
9125
9126         def _schedule_wait(self, wait_ids):
9127                 """
9128                 Schedule until wait_id is not longer registered
9129                 for poll() events.
9130                 @type wait_id: int
9131                 @param wait_id: a task id to wait for
9132                 """
9133                 event_handlers = self._poll_event_handlers
9134                 handler_ids = self._poll_event_handler_ids
9135                 event_handled = False
9136
9137                 if isinstance(wait_ids, int):
9138                         wait_ids = frozenset([wait_ids])
9139
9140                 try:
9141                         while wait_ids.intersection(handler_ids):
9142                                 f, event = self._next_poll_event()
9143                                 handler, reg_id = event_handlers[f]
9144                                 handler(f, event)
9145                                 event_handled = True
9146                 except StopIteration:
9147                         event_handled = True
9148
9149                 return event_handled
9150
9151 class QueueScheduler(PollScheduler):
9152
9153         """
9154         Add instances of SequentialTaskQueue and then call run(). The
9155         run() method returns when no tasks remain.
9156         """
9157
9158         def __init__(self, max_jobs=None, max_load=None):
9159                 PollScheduler.__init__(self)
9160
9161                 if max_jobs is None:
9162                         max_jobs = 1
9163
9164                 self._max_jobs = max_jobs
9165                 self._max_load = max_load
9166                 self.sched_iface = self._sched_iface_class(
9167                         register=self._register,
9168                         schedule=self._schedule_wait,
9169                         unregister=self._unregister)
9170
9171                 self._queues = []
9172                 self._schedule_listeners = []
9173
9174         def add(self, q):
9175                 self._queues.append(q)
9176
9177         def remove(self, q):
9178                 self._queues.remove(q)
9179
9180         def run(self):
9181
9182                 while self._schedule():
9183                         self._poll_loop()
9184
9185                 while self._running_job_count():
9186                         self._poll_loop()
9187
9188         def _schedule_tasks(self):
9189                 """
9190                 @rtype: bool
9191                 @returns: True if there may be remaining tasks to schedule,
9192                         False otherwise.
9193                 """
9194                 while self._can_add_job():
9195                         n = self._max_jobs - self._running_job_count()
9196                         if n < 1:
9197                                 break
9198
9199                         if not self._start_next_job(n):
9200                                 return False
9201
9202                 for q in self._queues:
9203                         if q:
9204                                 return True
9205                 return False
9206
9207         def _running_job_count(self):
9208                 job_count = 0
9209                 for q in self._queues:
9210                         job_count += len(q.running_tasks)
9211                 self._jobs = job_count
9212                 return job_count
9213
9214         def _start_next_job(self, n=1):
9215                 started_count = 0
9216                 for q in self._queues:
9217                         initial_job_count = len(q.running_tasks)
9218                         q.schedule()
9219                         final_job_count = len(q.running_tasks)
9220                         if final_job_count > initial_job_count:
9221                                 started_count += (final_job_count - initial_job_count)
9222                         if started_count >= n:
9223                                 break
9224                 return started_count
9225
9226 class TaskScheduler(object):
9227
9228         """
9229         A simple way to handle scheduling of AsynchrousTask instances. Simply
9230         add tasks and call run(). The run() method returns when no tasks remain.
9231         """
9232
9233         def __init__(self, max_jobs=None, max_load=None):
9234                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9235                 self._scheduler = QueueScheduler(
9236                         max_jobs=max_jobs, max_load=max_load)
9237                 self.sched_iface = self._scheduler.sched_iface
9238                 self.run = self._scheduler.run
9239                 self._scheduler.add(self._queue)
9240
9241         def add(self, task):
9242                 self._queue.add(task)
9243
9244 class JobStatusDisplay(object):
9245
9246         _bound_properties = ("curval", "failed", "running")
9247         _jobs_column_width = 48
9248
9249         # Don't update the display unless at least this much
9250         # time has passed, in units of seconds.
9251         _min_display_latency = 2
9252
9253         _default_term_codes = {
9254                 'cr'  : '\r',
9255                 'el'  : '\x1b[K',
9256                 'nel' : '\n',
9257         }
9258
9259         _termcap_name_map = {
9260                 'carriage_return' : 'cr',
9261                 'clr_eol'         : 'el',
9262                 'newline'         : 'nel',
9263         }
9264
9265         def __init__(self, out=sys.stdout, quiet=False):
9266                 object.__setattr__(self, "out", out)
9267                 object.__setattr__(self, "quiet", quiet)
9268                 object.__setattr__(self, "maxval", 0)
9269                 object.__setattr__(self, "merges", 0)
9270                 object.__setattr__(self, "_changed", False)
9271                 object.__setattr__(self, "_displayed", False)
9272                 object.__setattr__(self, "_last_display_time", 0)
9273                 object.__setattr__(self, "width", 80)
9274                 self.reset()
9275
9276                 isatty = hasattr(out, "isatty") and out.isatty()
9277                 object.__setattr__(self, "_isatty", isatty)
9278                 if not isatty or not self._init_term():
9279                         term_codes = {}
9280                         for k, capname in self._termcap_name_map.iteritems():
9281                                 term_codes[k] = self._default_term_codes[capname]
9282                         object.__setattr__(self, "_term_codes", term_codes)
9283
9284         def _init_term(self):
9285                 """
9286                 Initialize term control codes.
9287                 @rtype: bool
9288                 @returns: True if term codes were successfully initialized,
9289                         False otherwise.
9290                 """
9291
9292                 term_type = os.environ.get("TERM", "vt100")
9293                 tigetstr = None
9294
9295                 try:
9296                         import curses
9297                         try:
9298                                 curses.setupterm(term_type, self.out.fileno())
9299                                 tigetstr = curses.tigetstr
9300                         except curses.error:
9301                                 pass
9302                 except ImportError:
9303                         pass
9304
9305                 if tigetstr is None:
9306                         return False
9307
9308                 term_codes = {}
9309                 for k, capname in self._termcap_name_map.iteritems():
9310                         code = tigetstr(capname)
9311                         if code is None:
9312                                 code = self._default_term_codes[capname]
9313                         term_codes[k] = code
9314                 object.__setattr__(self, "_term_codes", term_codes)
9315                 return True
9316
9317         def _format_msg(self, msg):
9318                 return ">>> %s" % msg
9319
9320         def _erase(self):
9321                 self.out.write(
9322                         self._term_codes['carriage_return'] + \
9323                         self._term_codes['clr_eol'])
9324                 self.out.flush()
9325                 self._displayed = False
9326
9327         def _display(self, line):
9328                 self.out.write(line)
9329                 self.out.flush()
9330                 self._displayed = True
9331
9332         def _update(self, msg):
9333
9334                 out = self.out
9335                 if not self._isatty:
9336                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9337                         self.out.flush()
9338                         self._displayed = True
9339                         return
9340
9341                 if self._displayed:
9342                         self._erase()
9343
9344                 self._display(self._format_msg(msg))
9345
9346         def displayMessage(self, msg):
9347
9348                 was_displayed = self._displayed
9349
9350                 if self._isatty and self._displayed:
9351                         self._erase()
9352
9353                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9354                 self.out.flush()
9355                 self._displayed = False
9356
9357                 if was_displayed:
9358                         self._changed = True
9359                         self.display()
9360
9361         def reset(self):
9362                 self.maxval = 0
9363                 self.merges = 0
9364                 for name in self._bound_properties:
9365                         object.__setattr__(self, name, 0)
9366
9367                 if self._displayed:
9368                         self.out.write(self._term_codes['newline'])
9369                         self.out.flush()
9370                         self._displayed = False
9371
9372         def __setattr__(self, name, value):
9373                 old_value = getattr(self, name)
9374                 if value == old_value:
9375                         return
9376                 object.__setattr__(self, name, value)
9377                 if name in self._bound_properties:
9378                         self._property_change(name, old_value, value)
9379
9380         def _property_change(self, name, old_value, new_value):
9381                 self._changed = True
9382                 self.display()
9383
9384         def _load_avg_str(self):
9385                 try:
9386                         avg = os.getloadavg()
9387                 except (AttributeError, OSError), e:
9388                         return str(e)
9389
9390                 max_avg = max(avg)
9391
9392                 if max_avg < 10:
9393                         digits = 2
9394                 elif max_avg < 100:
9395                         digits = 1
9396                 else:
9397                         digits = 0
9398
9399                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9400
9401         def display(self):
9402                 """
9403                 Display status on stdout, but only if something has
9404                 changed since the last call.
9405                 """
9406
9407                 if self.quiet:
9408                         return
9409
9410                 current_time = time.time()
9411                 time_delta = current_time - self._last_display_time
9412                 if self._displayed and \
9413                         not self._changed:
9414                         if not self._isatty:
9415                                 return
9416                         if time_delta < self._min_display_latency:
9417                                 return
9418
9419                 self._last_display_time = current_time
9420                 self._changed = False
9421                 self._display_status()
9422
9423         def _display_status(self):
9424                 # Don't use len(self._completed_tasks) here since that also
9425                 # can include uninstall tasks.
9426                 curval_str = str(self.curval)
9427                 maxval_str = str(self.maxval)
9428                 running_str = str(self.running)
9429                 failed_str = str(self.failed)
9430                 load_avg_str = self._load_avg_str()
9431
9432                 color_output = StringIO.StringIO()
9433                 plain_output = StringIO.StringIO()
9434                 style_file = portage.output.ConsoleStyleFile(color_output)
9435                 style_file.write_listener = plain_output
9436                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9437                 style_writer.style_listener = style_file.new_styles
9438                 f = formatter.AbstractFormatter(style_writer)
9439
9440                 number_style = "INFORM"
9441                 f.add_literal_data("Jobs: ")
9442                 f.push_style(number_style)
9443                 f.add_literal_data(curval_str)
9444                 f.pop_style()
9445                 f.add_literal_data(" of ")
9446                 f.push_style(number_style)
9447                 f.add_literal_data(maxval_str)
9448                 f.pop_style()
9449                 f.add_literal_data(" complete")
9450
9451                 if self.running:
9452                         f.add_literal_data(", ")
9453                         f.push_style(number_style)
9454                         f.add_literal_data(running_str)
9455                         f.pop_style()
9456                         f.add_literal_data(" running")
9457
9458                 if self.failed:
9459                         f.add_literal_data(", ")
9460                         f.push_style(number_style)
9461                         f.add_literal_data(failed_str)
9462                         f.pop_style()
9463                         f.add_literal_data(" failed")
9464
9465                 padding = self._jobs_column_width - len(plain_output.getvalue())
9466                 if padding > 0:
9467                         f.add_literal_data(padding * " ")
9468
9469                 f.add_literal_data("Load avg: ")
9470                 f.add_literal_data(load_avg_str)
9471
9472                 # Truncate to fit width, to avoid making the terminal scroll if the
9473                 # line overflows (happens when the load average is large).
9474                 plain_output = plain_output.getvalue()
9475                 if self._isatty and len(plain_output) > self.width:
9476                         # Use plain_output here since it's easier to truncate
9477                         # properly than the color output which contains console
9478                         # color codes.
9479                         self._update(plain_output[:self.width])
9480                 else:
9481                         self._update(color_output.getvalue())
9482
9483                 xtermTitle(" ".join(plain_output.split()))
9484
9485 class Scheduler(PollScheduler):
9486
9487         _opts_ignore_blockers = \
9488                 frozenset(["--buildpkgonly",
9489                 "--fetchonly", "--fetch-all-uri",
9490                 "--nodeps", "--pretend"])
9491
9492         _opts_no_background = \
9493                 frozenset(["--pretend",
9494                 "--fetchonly", "--fetch-all-uri"])
9495
9496         _opts_no_restart = frozenset(["--buildpkgonly",
9497                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9498
9499         _bad_resume_opts = set(["--ask", "--changelog",
9500                 "--resume", "--skipfirst"])
9501
9502         _fetch_log = "/var/log/emerge-fetch.log"
9503
9504         class _iface_class(SlotObject):
9505                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9506                         "dblinkElog", "fetch", "register", "schedule",
9507                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9508                         "unregister")
9509
9510         class _fetch_iface_class(SlotObject):
9511                 __slots__ = ("log_file", "schedule")
9512
9513         _task_queues_class = slot_dict_class(
9514                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9515
9516         class _build_opts_class(SlotObject):
9517                 __slots__ = ("buildpkg", "buildpkgonly",
9518                         "fetch_all_uri", "fetchonly", "pretend")
9519
9520         class _binpkg_opts_class(SlotObject):
9521                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9522
9523         class _pkg_count_class(SlotObject):
9524                 __slots__ = ("curval", "maxval")
9525
9526         class _emerge_log_class(SlotObject):
9527                 __slots__ = ("xterm_titles",)
9528
9529                 def log(self, *pargs, **kwargs):
9530                         if not self.xterm_titles:
9531                                 # Avoid interference with the scheduler's status display.
9532                                 kwargs.pop("short_msg", None)
9533                         emergelog(self.xterm_titles, *pargs, **kwargs)
9534
9535         class _failed_pkg(SlotObject):
9536                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9537
9538         class _ConfigPool(object):
9539                 """Interface for a task to temporarily allocate a config
9540                 instance from a pool. This allows a task to be constructed
9541                 long before the config instance actually becomes needed, like
9542                 when prefetchers are constructed for the whole merge list."""
9543                 __slots__ = ("_root", "_allocate", "_deallocate")
9544                 def __init__(self, root, allocate, deallocate):
9545                         self._root = root
9546                         self._allocate = allocate
9547                         self._deallocate = deallocate
9548                 def allocate(self):
9549                         return self._allocate(self._root)
9550                 def deallocate(self, settings):
9551                         self._deallocate(settings)
9552
9553         class _unknown_internal_error(portage.exception.PortageException):
9554                 """
9555                 Used internally to terminate scheduling. The specific reason for
9556                 the failure should have been dumped to stderr.
9557                 """
9558                 def __init__(self, value=""):
9559                         portage.exception.PortageException.__init__(self, value)
9560
9561         def __init__(self, settings, trees, mtimedb, myopts,
9562                 spinner, mergelist, favorites, digraph):
9563                 PollScheduler.__init__(self)
9564                 self.settings = settings
9565                 self.target_root = settings["ROOT"]
9566                 self.trees = trees
9567                 self.myopts = myopts
9568                 self._spinner = spinner
9569                 self._mtimedb = mtimedb
9570                 self._mergelist = mergelist
9571                 self._favorites = favorites
9572                 self._args_set = InternalPackageSet(favorites)
9573                 self._build_opts = self._build_opts_class()
9574                 for k in self._build_opts.__slots__:
9575                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9576                 self._binpkg_opts = self._binpkg_opts_class()
9577                 for k in self._binpkg_opts.__slots__:
9578                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9579
9580                 self.curval = 0
9581                 self._logger = self._emerge_log_class()
9582                 self._task_queues = self._task_queues_class()
9583                 for k in self._task_queues.allowed_keys:
9584                         setattr(self._task_queues, k,
9585                                 SequentialTaskQueue())
9586                 self._status_display = JobStatusDisplay()
9587                 self._max_load = myopts.get("--load-average")
9588                 max_jobs = myopts.get("--jobs")
9589                 if max_jobs is None:
9590                         max_jobs = 1
9591                 self._set_max_jobs(max_jobs)
9592
9593                 # The root where the currently running
9594                 # portage instance is installed.
9595                 self._running_root = trees["/"]["root_config"]
9596                 self.edebug = 0
9597                 if settings.get("PORTAGE_DEBUG", "") == "1":
9598                         self.edebug = 1
9599                 self.pkgsettings = {}
9600                 self._config_pool = {}
9601                 self._blocker_db = {}
9602                 for root in trees:
9603                         self._config_pool[root] = []
9604                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9605
9606                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9607                         schedule=self._schedule_fetch)
9608                 self._sched_iface = self._iface_class(
9609                         dblinkEbuildPhase=self._dblink_ebuild_phase,
9610                         dblinkDisplayMerge=self._dblink_display_merge,
9611                         dblinkElog=self._dblink_elog,
9612                         fetch=fetch_iface, register=self._register,
9613                         schedule=self._schedule_wait,
9614                         scheduleSetup=self._schedule_setup,
9615                         scheduleUnpack=self._schedule_unpack,
9616                         scheduleYield=self._schedule_yield,
9617                         unregister=self._unregister)
9618
9619                 self._prefetchers = weakref.WeakValueDictionary()
9620                 self._pkg_queue = []
9621                 self._completed_tasks = set()
9622
9623                 self._failed_pkgs = []
9624                 self._failed_pkgs_all = []
9625                 self._failed_pkgs_die_msgs = []
9626                 self._post_mod_echo_msgs = []
9627                 self._parallel_fetch = False
9628                 merge_count = len([x for x in mergelist \
9629                         if isinstance(x, Package) and x.operation == "merge"])
9630                 self._pkg_count = self._pkg_count_class(
9631                         curval=0, maxval=merge_count)
9632                 self._status_display.maxval = self._pkg_count.maxval
9633
9634                 # The load average takes some time to respond when new
9635                 # jobs are added, so we need to limit the rate of adding
9636                 # new jobs.
9637                 self._job_delay_max = 10
9638                 self._job_delay_factor = 1.0
9639                 self._job_delay_exp = 1.5
9640                 self._previous_job_start_time = None
9641
9642                 self._set_digraph(digraph)
9643
9644                 # This is used to memoize the _choose_pkg() result when
9645                 # no packages can be chosen until one of the existing
9646                 # jobs completes.
9647                 self._choose_pkg_return_early = False
9648
9649                 features = self.settings.features
9650                 if "parallel-fetch" in features and \
9651                         not ("--pretend" in self.myopts or \
9652                         "--fetch-all-uri" in self.myopts or \
9653                         "--fetchonly" in self.myopts):
9654                         if "distlocks" not in features:
9655                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9656                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
9657                                         "requires the distlocks feature enabled"+"\n",
9658                                         noiselevel=-1)
9659                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
9660                                         "thus parallel-fetching is being disabled"+"\n",
9661                                         noiselevel=-1)
9662                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9663                         elif len(mergelist) > 1:
9664                                 self._parallel_fetch = True
9665
9666                 if self._parallel_fetch:
9667                                 # clear out existing fetch log if it exists
9668                                 try:
9669                                         open(self._fetch_log, 'w')
9670                                 except EnvironmentError:
9671                                         pass
9672
9673                 self._running_portage = None
9674                 portage_match = self._running_root.trees["vartree"].dbapi.match(
9675                         portage.const.PORTAGE_PACKAGE_ATOM)
9676                 if portage_match:
9677                         cpv = portage_match.pop()
9678                         self._running_portage = self._pkg(cpv, "installed",
9679                                 self._running_root, installed=True)
9680
9681         def _poll(self, timeout=None):
9682                 self._schedule()
9683                 PollScheduler._poll(self, timeout=timeout)
9684
9685         def _set_max_jobs(self, max_jobs):
9686                 self._max_jobs = max_jobs
9687                 self._task_queues.jobs.max_jobs = max_jobs
9688
9689         def _background_mode(self):
9690                 """
9691                 Check if background mode is enabled and adjust states as necessary.
9692
9693                 @rtype: bool
9694                 @returns: True if background mode is enabled, False otherwise.
9695                 """
9696                 background = (self._max_jobs is True or \
9697                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
9698                         not bool(self._opts_no_background.intersection(self.myopts))
9699
9700                 if background:
9701                         interactive_tasks = self._get_interactive_tasks()
9702                         if interactive_tasks:
9703                                 background = False
9704                                 writemsg_level(">>> Sending package output to stdio due " + \
9705                                         "to interactive package(s):\n",
9706                                         level=logging.INFO, noiselevel=-1)
9707                                 msg = [""]
9708                                 for pkg in interactive_tasks:
9709                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
9710                                         if pkg.root != "/":
9711                                                 pkg_str += " for " + pkg.root
9712                                         msg.append(pkg_str)
9713                                 msg.append("")
9714                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
9715                                         level=logging.INFO, noiselevel=-1)
9716                                 if self._max_jobs is True or self._max_jobs > 1:
9717                                         self._set_max_jobs(1)
9718                                         writemsg_level(">>> Setting --jobs=1 due " + \
9719                                                 "to the above interactive package(s)\n",
9720                                                 level=logging.INFO, noiselevel=-1)
9721
9722                 self._status_display.quiet = \
9723                         not background or \
9724                         ("--quiet" in self.myopts and \
9725                         "--verbose" not in self.myopts)
9726
9727                 self._logger.xterm_titles = \
9728                         "notitles" not in self.settings.features and \
9729                         self._status_display.quiet
9730
9731                 return background
9732
9733         def _get_interactive_tasks(self):
9734                 from portage import flatten
9735                 from portage.dep import use_reduce, paren_reduce
9736                 interactive_tasks = []
9737                 for task in self._mergelist:
9738                         if not (isinstance(task, Package) and \
9739                                 task.operation == "merge"):
9740                                 continue
9741                         try:
9742                                 properties = flatten(use_reduce(paren_reduce(
9743                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9744                         except portage.exception.InvalidDependString, e:
9745                                 show_invalid_depstring_notice(task,
9746                                         task.metadata["PROPERTIES"], str(e))
9747                                 raise self._unknown_internal_error()
9748                         if "interactive" in properties:
9749                                 interactive_tasks.append(task)
9750                 return interactive_tasks
9751
9752         def _set_digraph(self, digraph):
9753                 if "--nodeps" in self.myopts or \
9754                         (self._max_jobs is not True and self._max_jobs < 2):
9755                         # save some memory
9756                         self._digraph = None
9757                         return
9758
9759                 self._digraph = digraph
9760                 self._prune_digraph()
9761
9762         def _prune_digraph(self):
9763                 """
9764                 Prune any root nodes that are irrelevant.
9765                 """
9766
9767                 graph = self._digraph
9768                 completed_tasks = self._completed_tasks
9769                 removed_nodes = set()
9770                 while True:
9771                         for node in graph.root_nodes():
9772                                 if not isinstance(node, Package) or \
9773                                         (node.installed and node.operation == "nomerge") or \
9774                                         node.onlydeps or \
9775                                         node in completed_tasks:
9776                                         removed_nodes.add(node)
9777                         if removed_nodes:
9778                                 graph.difference_update(removed_nodes)
9779                         if not removed_nodes:
9780                                 break
9781                         removed_nodes.clear()
9782
9783         class _pkg_failure(portage.exception.PortageException):
9784                 """
9785                 An instance of this class is raised by unmerge() when
9786                 an uninstallation fails.
9787                 """
9788                 status = 1
9789                 def __init__(self, *pargs):
9790                         portage.exception.PortageException.__init__(self, pargs)
9791                         if pargs:
9792                                 self.status = pargs[0]
9793
9794         def _schedule_fetch(self, fetcher):
9795                 """
9796                 Schedule a fetcher on the fetch queue, in order to
9797                 serialize access to the fetch log.
9798                 """
9799                 self._task_queues.fetch.addFront(fetcher)
9800
9801         def _schedule_setup(self, setup_phase):
9802                 """
9803                 Schedule a setup phase on the merge queue, in order to
9804                 serialize unsandboxed access to the live filesystem.
9805                 """
9806                 self._task_queues.merge.addFront(setup_phase)
9807                 self._schedule()
9808
9809         def _schedule_unpack(self, unpack_phase):
9810                 """
9811                 Schedule an unpack phase on the unpack queue, in order
9812                 to serialize $DISTDIR access for live ebuilds.
9813                 """
9814                 self._task_queues.unpack.add(unpack_phase)
9815
9816         def _find_blockers(self, new_pkg):
9817                 """
9818                 Returns a callable which should be called only when
9819                 the vdb lock has been acquired.
9820                 """
9821                 def get_blockers():
9822                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9823                 return get_blockers
9824
9825         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9826                 if self._opts_ignore_blockers.intersection(self.myopts):
9827                         return None
9828
9829                 # Call gc.collect() here to avoid heap overflow that
9830                 # triggers 'Cannot allocate memory' errors (reported
9831                 # with python-2.5).
9832                 import gc
9833                 gc.collect()
9834
9835                 blocker_db = self._blocker_db[new_pkg.root]
9836
9837                 blocker_dblinks = []
9838                 for blocking_pkg in blocker_db.findInstalledBlockers(
9839                         new_pkg, acquire_lock=acquire_lock):
9840                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
9841                                 continue
9842                         if new_pkg.cpv == blocking_pkg.cpv:
9843                                 continue
9844                         blocker_dblinks.append(portage.dblink(
9845                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9846                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9847                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
9848
9849                 gc.collect()
9850
9851                 return blocker_dblinks
9852
9853         def _dblink_pkg(self, pkg_dblink):
9854                 cpv = pkg_dblink.mycpv
9855                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9856                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9857                 installed = type_name == "installed"
9858                 return self._pkg(cpv, type_name, root_config, installed=installed)
9859
9860         def _append_to_log_path(self, log_path, msg):
9861                 f = open(log_path, 'a')
9862                 try:
9863                         f.write(msg)
9864                 finally:
9865                         f.close()
9866
9867         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9868
9869                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9870                 log_file = None
9871                 out = sys.stdout
9872                 background = self._background
9873
9874                 if background and log_path is not None:
9875                         log_file = open(log_path, 'a')
9876                         out = log_file
9877
9878                 try:
9879                         for msg in msgs:
9880                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9881                 finally:
9882                         if log_file is not None:
9883                                 log_file.close()
9884
9885         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9886                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9887                 background = self._background
9888
9889                 if log_path is None:
9890                         if not (background and level < logging.WARN):
9891                                 portage.util.writemsg_level(msg,
9892                                         level=level, noiselevel=noiselevel)
9893                 else:
9894                         if not background:
9895                                 portage.util.writemsg_level(msg,
9896                                         level=level, noiselevel=noiselevel)
9897                         self._append_to_log_path(log_path, msg)
9898
9899         def _dblink_ebuild_phase(self,
9900                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9901                 """
9902                 Using this callback for merge phases allows the scheduler
9903                 to run while these phases execute asynchronously, and allows
9904                 the scheduler control output handling.
9905                 """
9906
9907                 scheduler = self._sched_iface
9908                 settings = pkg_dblink.settings
9909                 pkg = self._dblink_pkg(pkg_dblink)
9910                 background = self._background
9911                 log_path = settings.get("PORTAGE_LOG_FILE")
9912
9913                 ebuild_phase = EbuildPhase(background=background,
9914                         pkg=pkg, phase=phase, scheduler=scheduler,
9915                         settings=settings, tree=pkg_dblink.treetype)
9916                 ebuild_phase.start()
9917                 ebuild_phase.wait()
9918
9919                 return ebuild_phase.returncode
9920
9921         def _check_manifests(self):
9922                 # Verify all the manifests now so that the user is notified of failure
9923                 # as soon as possible.
9924                 if "strict" not in self.settings.features or \
9925                         "--fetchonly" in self.myopts or \
9926                         "--fetch-all-uri" in self.myopts:
9927                         return os.EX_OK
9928
9929                 shown_verifying_msg = False
9930                 quiet_settings = {}
9931                 for myroot, pkgsettings in self.pkgsettings.iteritems():
9932                         quiet_config = portage.config(clone=pkgsettings)
9933                         quiet_config["PORTAGE_QUIET"] = "1"
9934                         quiet_config.backup_changes("PORTAGE_QUIET")
9935                         quiet_settings[myroot] = quiet_config
9936                         del quiet_config
9937
9938                 for x in self._mergelist:
9939                         if not isinstance(x, Package) or \
9940                                 x.type_name != "ebuild":
9941                                 continue
9942
9943                         if not shown_verifying_msg:
9944                                 shown_verifying_msg = True
9945                                 self._status_msg("Verifying ebuild manifests")
9946
9947                         root_config = x.root_config
9948                         portdb = root_config.trees["porttree"].dbapi
9949                         quiet_config = quiet_settings[root_config.root]
9950                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9951                         if not portage.digestcheck([], quiet_config, strict=True):
9952                                 return 1
9953
9954                 return os.EX_OK
9955
9956         def _add_prefetchers(self):
9957
9958                 if not self._parallel_fetch:
9959                         return
9960
9961                 if self._parallel_fetch:
9962                         self._status_msg("Starting parallel fetch")
9963
9964                         prefetchers = self._prefetchers
9965                         getbinpkg = "--getbinpkg" in self.myopts
9966
9967                         # In order to avoid "waiting for lock" messages
9968                         # at the beginning, which annoy users, never
9969                         # spawn a prefetcher for the first package.
9970                         for pkg in self._mergelist[1:]:
9971                                 prefetcher = self._create_prefetcher(pkg)
9972                                 if prefetcher is not None:
9973                                         self._task_queues.fetch.add(prefetcher)
9974                                         prefetchers[pkg] = prefetcher
9975
9976         def _create_prefetcher(self, pkg):
9977                 """
9978                 @return: a prefetcher, or None if not applicable
9979                 """
9980                 prefetcher = None
9981
9982                 if not isinstance(pkg, Package):
9983                         pass
9984
9985                 elif pkg.type_name == "ebuild":
9986
9987                         prefetcher = EbuildFetcher(background=True,
9988                                 config_pool=self._ConfigPool(pkg.root,
9989                                 self._allocate_config, self._deallocate_config),
9990                                 fetchonly=1, logfile=self._fetch_log,
9991                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9992
9993                 elif pkg.type_name == "binary" and \
9994                         "--getbinpkg" in self.myopts and \
9995                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9996
9997                         prefetcher = BinpkgPrefetcher(background=True,
9998                                 pkg=pkg, scheduler=self._sched_iface)
9999
10000                 return prefetcher
10001
10002         def _is_restart_scheduled(self):
10003                 """
10004                 Check if the merge list contains a replacement
10005                 for the current running instance, that will result
10006                 in restart after merge.
10007                 @rtype: bool
10008                 @returns: True if a restart is scheduled, False otherwise.
10009                 """
10010                 if self._opts_no_restart.intersection(self.myopts):
10011                         return False
10012
10013                 mergelist = self._mergelist
10014
10015                 for i, pkg in enumerate(mergelist):
10016                         if self._is_restart_necessary(pkg) and \
10017                                 i != len(mergelist) - 1:
10018                                 return True
10019
10020                 return False
10021
10022         def _is_restart_necessary(self, pkg):
10023                 """
10024                 @return: True if merging the given package
10025                         requires restart, False otherwise.
10026                 """
10027
10028                 # Figure out if we need a restart.
10029                 if pkg.root == self._running_root.root and \
10030                         portage.match_from_list(
10031                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10032                         if self._running_portage:
10033                                 return pkg.cpv != self._running_portage.cpv
10034                         return True
10035                 return False
10036
10037         def _restart_if_necessary(self, pkg):
10038                 """
10039                 Use execv() to restart emerge. This happens
10040                 if portage upgrades itself and there are
10041                 remaining packages in the list.
10042                 """
10043
10044                 if self._opts_no_restart.intersection(self.myopts):
10045                         return
10046
10047                 if not self._is_restart_necessary(pkg):
10048                         return
10049
10050                 if pkg == self._mergelist[-1]:
10051                         return
10052
10053                 self._main_loop_cleanup()
10054
10055                 logger = self._logger
10056                 pkg_count = self._pkg_count
10057                 mtimedb = self._mtimedb
10058                 bad_resume_opts = self._bad_resume_opts
10059
10060                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10061                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10062
10063                 logger.log(" *** RESTARTING " + \
10064                         "emerge via exec() after change of " + \
10065                         "portage version.")
10066
10067                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10068                 mtimedb.commit()
10069                 portage.run_exitfuncs()
10070                 mynewargv = [sys.argv[0], "--resume"]
10071                 resume_opts = self.myopts.copy()
10072                 # For automatic resume, we need to prevent
10073                 # any of bad_resume_opts from leaking in
10074                 # via EMERGE_DEFAULT_OPTS.
10075                 resume_opts["--ignore-default-opts"] = True
10076                 for myopt, myarg in resume_opts.iteritems():
10077                         if myopt not in bad_resume_opts:
10078                                 if myarg is True:
10079                                         mynewargv.append(myopt)
10080                                 else:
10081                                         mynewargv.append(myopt +"="+ str(myarg))
10082                 # priority only needs to be adjusted on the first run
10083                 os.environ["PORTAGE_NICENESS"] = "0"
10084                 os.execv(mynewargv[0], mynewargv)
10085
10086         def merge(self):
10087
10088                 if "--resume" in self.myopts:
10089                         # We're resuming.
10090                         portage.writemsg_stdout(
10091                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10092                         self._logger.log(" *** Resuming merge...")
10093
10094                 self._save_resume_list()
10095
10096                 try:
10097                         self._background = self._background_mode()
10098                 except self._unknown_internal_error:
10099                         return 1
10100
10101                 for root in self.trees:
10102                         root_config = self.trees[root]["root_config"]
10103
10104                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10105                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10106                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10107                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10108                         if not tmpdir or not os.path.isdir(tmpdir):
10109                                 msg = "The directory specified in your " + \
10110                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10111                                 "does not exist. Please create this " + \
10112                                 "directory or correct your PORTAGE_TMPDIR setting."
10113                                 msg = textwrap.wrap(msg, 70)
10114                                 out = portage.output.EOutput()
10115                                 for l in msg:
10116                                         out.eerror(l)
10117                                 return 1
10118
10119                         if self._background:
10120                                 root_config.settings.unlock()
10121                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10122                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10123                                 root_config.settings.lock()
10124
10125                         self.pkgsettings[root] = portage.config(
10126                                 clone=root_config.settings)
10127
10128                 rval = self._check_manifests()
10129                 if rval != os.EX_OK:
10130                         return rval
10131
10132                 keep_going = "--keep-going" in self.myopts
10133                 fetchonly = self._build_opts.fetchonly
10134                 mtimedb = self._mtimedb
10135                 failed_pkgs = self._failed_pkgs
10136
10137                 while True:
10138                         rval = self._merge()
10139                         if rval == os.EX_OK or fetchonly or not keep_going:
10140                                 break
10141                         if "resume" not in mtimedb:
10142                                 break
10143                         mergelist = self._mtimedb["resume"].get("mergelist")
10144                         if not mergelist:
10145                                 break
10146
10147                         if not failed_pkgs:
10148                                 break
10149
10150                         for failed_pkg in failed_pkgs:
10151                                 mergelist.remove(list(failed_pkg.pkg))
10152
10153                         self._failed_pkgs_all.extend(failed_pkgs)
10154                         del failed_pkgs[:]
10155
10156                         if not mergelist:
10157                                 break
10158
10159                         if not self._calc_resume_list():
10160                                 break
10161
10162                         clear_caches(self.trees)
10163                         if not self._mergelist:
10164                                 break
10165
10166                         self._save_resume_list()
10167                         self._pkg_count.curval = 0
10168                         self._pkg_count.maxval = len([x for x in self._mergelist \
10169                                 if isinstance(x, Package) and x.operation == "merge"])
10170                         self._status_display.maxval = self._pkg_count.maxval
10171
10172                 self._logger.log(" *** Finished. Cleaning up...")
10173
10174                 if failed_pkgs:
10175                         self._failed_pkgs_all.extend(failed_pkgs)
10176                         del failed_pkgs[:]
10177
10178                 background = self._background
10179                 failure_log_shown = False
10180                 if background and len(self._failed_pkgs_all) == 1:
10181                         # If only one package failed then just show it's
10182                         # whole log for easy viewing.
10183                         failed_pkg = self._failed_pkgs_all[-1]
10184                         build_dir = failed_pkg.build_dir
10185                         log_file = None
10186
10187                         log_paths = [failed_pkg.build_log]
10188
10189                         log_path = self._locate_failure_log(failed_pkg)
10190                         if log_path is not None:
10191                                 try:
10192                                         log_file = open(log_path, 'rb')
10193                                 except IOError:
10194                                         pass
10195
10196                         if log_file is not None:
10197                                 try:
10198                                         for line in log_file:
10199                                                 writemsg_level(line, noiselevel=-1)
10200                                 finally:
10201                                         log_file.close()
10202                                 failure_log_shown = True
10203
10204                 # Dump mod_echo output now since it tends to flood the terminal.
10205                 # This allows us to avoid having more important output, generated
10206                 # later, from being swept away by the mod_echo output.
10207                 mod_echo_output =  _flush_elog_mod_echo()
10208
10209                 if background and not failure_log_shown and \
10210                         self._failed_pkgs_all and \
10211                         self._failed_pkgs_die_msgs and \
10212                         not mod_echo_output:
10213
10214                         printer = portage.output.EOutput()
10215                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10216                                 root_msg = ""
10217                                 if mysettings["ROOT"] != "/":
10218                                         root_msg = " merged to %s" % mysettings["ROOT"]
10219                                 print
10220                                 printer.einfo("Error messages for package %s%s:" % \
10221                                         (colorize("INFORM", key), root_msg))
10222                                 print
10223                                 for phase in portage.const.EBUILD_PHASES:
10224                                         if phase not in logentries:
10225                                                 continue
10226                                         for msgtype, msgcontent in logentries[phase]:
10227                                                 if isinstance(msgcontent, basestring):
10228                                                         msgcontent = [msgcontent]
10229                                                 for line in msgcontent:
10230                                                         printer.eerror(line.strip("\n"))
10231
10232                 if self._post_mod_echo_msgs:
10233                         for msg in self._post_mod_echo_msgs:
10234                                 msg()
10235
10236                 if len(self._failed_pkgs_all) > 1:
10237                         msg = "The following packages have " + \
10238                                 "failed to build or install:"
10239                         prefix = bad(" * ")
10240                         writemsg(prefix + "\n", noiselevel=-1)
10241                         from textwrap import wrap
10242                         for line in wrap(msg, 72):
10243                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10244                         writemsg(prefix + "\n", noiselevel=-1)
10245                         for failed_pkg in self._failed_pkgs_all:
10246                                 writemsg("%s\t%s\n" % (prefix,
10247                                         colorize("INFORM", str(failed_pkg.pkg))),
10248                                         noiselevel=-1)
10249                         writemsg(prefix + "\n", noiselevel=-1)
10250
10251                 return rval
10252
10253         def _elog_listener(self, mysettings, key, logentries, fulltext):
10254                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10255                 if errors:
10256                         self._failed_pkgs_die_msgs.append(
10257                                 (mysettings, key, errors))
10258
10259         def _locate_failure_log(self, failed_pkg):
10260
10261                 build_dir = failed_pkg.build_dir
10262                 log_file = None
10263
10264                 log_paths = [failed_pkg.build_log]
10265
10266                 for log_path in log_paths:
10267                         if not log_path:
10268                                 continue
10269
10270                         try:
10271                                 log_size = os.stat(log_path).st_size
10272                         except OSError:
10273                                 continue
10274
10275                         if log_size == 0:
10276                                 continue
10277
10278                         return log_path
10279
10280                 return None
10281
10282         def _add_packages(self):
10283                 pkg_queue = self._pkg_queue
10284                 for pkg in self._mergelist:
10285                         if isinstance(pkg, Package):
10286                                 pkg_queue.append(pkg)
10287                         elif isinstance(pkg, Blocker):
10288                                 pass
10289
10290         def _merge_exit(self, merge):
10291                 self._do_merge_exit(merge)
10292                 self._deallocate_config(merge.merge.settings)
10293                 if merge.returncode == os.EX_OK and \
10294                         not merge.merge.pkg.installed:
10295                         self._status_display.curval += 1
10296                 self._status_display.merges = len(self._task_queues.merge)
10297                 self._schedule()
10298
10299         def _do_merge_exit(self, merge):
10300                 pkg = merge.merge.pkg
10301                 if merge.returncode != os.EX_OK:
10302                         settings = merge.merge.settings
10303                         build_dir = settings.get("PORTAGE_BUILDDIR")
10304                         build_log = settings.get("PORTAGE_LOG_FILE")
10305
10306                         self._failed_pkgs.append(self._failed_pkg(
10307                                 build_dir=build_dir, build_log=build_log,
10308                                 pkg=pkg,
10309                                 returncode=merge.returncode))
10310                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10311
10312                         self._status_display.failed = len(self._failed_pkgs)
10313                         return
10314
10315                 self._task_complete(pkg)
10316                 pkg_to_replace = merge.merge.pkg_to_replace
10317                 if pkg_to_replace is not None:
10318                         # When a package is replaced, mark it's uninstall
10319                         # task complete (if any).
10320                         uninst_hash_key = \
10321                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10322                         self._task_complete(uninst_hash_key)
10323
10324                 if pkg.installed:
10325                         return
10326
10327                 self._restart_if_necessary(pkg)
10328
10329                 # Call mtimedb.commit() after each merge so that
10330                 # --resume still works after being interrupted
10331                 # by reboot, sigkill or similar.
10332                 mtimedb = self._mtimedb
10333                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10334                 if not mtimedb["resume"]["mergelist"]:
10335                         del mtimedb["resume"]
10336                 mtimedb.commit()
10337
10338         def _build_exit(self, build):
10339                 if build.returncode == os.EX_OK:
10340                         self.curval += 1
10341                         merge = PackageMerge(merge=build)
10342                         merge.addExitListener(self._merge_exit)
10343                         self._task_queues.merge.add(merge)
10344                         self._status_display.merges = len(self._task_queues.merge)
10345                 else:
10346                         settings = build.settings
10347                         build_dir = settings.get("PORTAGE_BUILDDIR")
10348                         build_log = settings.get("PORTAGE_LOG_FILE")
10349
10350                         self._failed_pkgs.append(self._failed_pkg(
10351                                 build_dir=build_dir, build_log=build_log,
10352                                 pkg=build.pkg,
10353                                 returncode=build.returncode))
10354                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10355
10356                         self._status_display.failed = len(self._failed_pkgs)
10357                         self._deallocate_config(build.settings)
10358                 self._jobs -= 1
10359                 self._status_display.running = self._jobs
10360                 self._schedule()
10361
10362         def _extract_exit(self, build):
10363                 self._build_exit(build)
10364
10365         def _task_complete(self, pkg):
10366                 self._completed_tasks.add(pkg)
10367                 self._choose_pkg_return_early = False
10368
10369         def _merge(self):
10370
10371                 self._add_prefetchers()
10372                 self._add_packages()
10373                 pkg_queue = self._pkg_queue
10374                 failed_pkgs = self._failed_pkgs
10375                 portage.locks._quiet = self._background
10376                 portage.elog._emerge_elog_listener = self._elog_listener
10377                 rval = os.EX_OK
10378
10379                 try:
10380                         self._main_loop()
10381                 finally:
10382                         self._main_loop_cleanup()
10383                         portage.locks._quiet = False
10384                         portage.elog._emerge_elog_listener = None
10385                         if failed_pkgs:
10386                                 rval = failed_pkgs[-1].returncode
10387
10388                 return rval
10389
10390         def _main_loop_cleanup(self):
10391                 del self._pkg_queue[:]
10392                 self._completed_tasks.clear()
10393                 self._choose_pkg_return_early = False
10394                 self._status_display.reset()
10395                 self._digraph = None
10396                 self._task_queues.fetch.clear()
10397
10398         def _choose_pkg(self):
10399                 """
10400                 Choose a task that has all it's dependencies satisfied.
10401                 """
10402
10403                 if self._choose_pkg_return_early:
10404                         return None
10405
10406                 if self._digraph is None:
10407                         if (self._jobs or self._task_queues.merge) and \
10408                                 not ("--nodeps" in self.myopts and \
10409                                 (self._max_jobs is True or self._max_jobs > 1)):
10410                                 self._choose_pkg_return_early = True
10411                                 return None
10412                         return self._pkg_queue.pop(0)
10413
10414                 if not (self._jobs or self._task_queues.merge):
10415                         return self._pkg_queue.pop(0)
10416
10417                 self._prune_digraph()
10418
10419                 chosen_pkg = None
10420                 later = set(self._pkg_queue)
10421                 for pkg in self._pkg_queue:
10422                         later.remove(pkg)
10423                         if not self._dependent_on_scheduled_merges(pkg, later):
10424                                 chosen_pkg = pkg
10425                                 break
10426
10427                 if chosen_pkg is not None:
10428                         self._pkg_queue.remove(chosen_pkg)
10429
10430                 if chosen_pkg is None:
10431                         # There's no point in searching for a package to
10432                         # choose until at least one of the existing jobs
10433                         # completes.
10434                         self._choose_pkg_return_early = True
10435
10436                 return chosen_pkg
10437
10438         def _dependent_on_scheduled_merges(self, pkg, later):
10439                 """
10440                 Traverse the subgraph of the given packages deep dependencies
10441                 to see if it contains any scheduled merges.
10442                 @param pkg: a package to check dependencies for
10443                 @type pkg: Package
10444                 @param later: packages for which dependence should be ignored
10445                         since they will be merged later than pkg anyway and therefore
10446                         delaying the merge of pkg will not result in a more optimal
10447                         merge order
10448                 @type later: set
10449                 @rtype: bool
10450                 @returns: True if the package is dependent, False otherwise.
10451                 """
10452
10453                 graph = self._digraph
10454                 completed_tasks = self._completed_tasks
10455
10456                 dependent = False
10457                 traversed_nodes = set([pkg])
10458                 direct_deps = graph.child_nodes(pkg)
10459                 node_stack = direct_deps
10460                 direct_deps = frozenset(direct_deps)
10461                 while node_stack:
10462                         node = node_stack.pop()
10463                         if node in traversed_nodes:
10464                                 continue
10465                         traversed_nodes.add(node)
10466                         if not ((node.installed and node.operation == "nomerge") or \
10467                                 (node.operation == "uninstall" and \
10468                                 node not in direct_deps) or \
10469                                 node in completed_tasks or \
10470                                 node in later):
10471                                 dependent = True
10472                                 break
10473                         node_stack.extend(graph.child_nodes(node))
10474
10475                 return dependent
10476
10477         def _allocate_config(self, root):
10478                 """
10479                 Allocate a unique config instance for a task in order
10480                 to prevent interference between parallel tasks.
10481                 """
10482                 if self._config_pool[root]:
10483                         temp_settings = self._config_pool[root].pop()
10484                 else:
10485                         temp_settings = portage.config(clone=self.pkgsettings[root])
10486                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10487                 # performance reasons, call it here to make sure all settings from the
10488                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10489                 temp_settings.reload()
10490                 temp_settings.reset()
10491                 return temp_settings
10492
10493         def _deallocate_config(self, settings):
10494                 self._config_pool[settings["ROOT"]].append(settings)
10495
10496         def _main_loop(self):
10497
10498                 # Only allow 1 job max if a restart is scheduled
10499                 # due to portage update.
10500                 if self._is_restart_scheduled() or \
10501                         self._opts_no_background.intersection(self.myopts):
10502                         self._set_max_jobs(1)
10503
10504                 merge_queue = self._task_queues.merge
10505
10506                 while self._schedule():
10507                         if self._poll_event_handlers:
10508                                 self._poll_loop()
10509
10510                 while True:
10511                         self._schedule()
10512                         if not (self._jobs or merge_queue):
10513                                 break
10514                         if self._poll_event_handlers:
10515                                 self._poll_loop()
10516
10517         def _keep_scheduling(self):
10518                 return bool(self._pkg_queue and \
10519                         not (self._failed_pkgs and not self._build_opts.fetchonly))
10520
10521         def _schedule_tasks(self):
10522                 self._schedule_tasks_imp()
10523                 self._status_display.display()
10524
10525                 state_change = 0
10526                 for q in self._task_queues.values():
10527                         if q.schedule():
10528                                 state_change += 1
10529
10530                 # Cancel prefetchers if they're the only reason
10531                 # the main poll loop is still running.
10532                 if self._failed_pkgs and not self._build_opts.fetchonly and \
10533                         not (self._jobs or self._task_queues.merge) and \
10534                         self._task_queues.fetch:
10535                         self._task_queues.fetch.clear()
10536                         state_change += 1
10537
10538                 if state_change:
10539                         self._schedule_tasks_imp()
10540                         self._status_display.display()
10541
10542                 return self._keep_scheduling()
10543
10544         def _job_delay(self):
10545                 """
10546                 @rtype: bool
10547                 @returns: True if job scheduling should be delayed, False otherwise.
10548                 """
10549
10550                 if self._jobs and self._max_load is not None:
10551
10552                         current_time = time.time()
10553
10554                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10555                         if delay > self._job_delay_max:
10556                                 delay = self._job_delay_max
10557                         if (current_time - self._previous_job_start_time) < delay:
10558                                 return True
10559
10560                 return False
10561
10562         def _schedule_tasks_imp(self):
10563                 """
10564                 @rtype: bool
10565                 @returns: True if state changed, False otherwise.
10566                 """
10567
10568                 state_change = 0
10569
10570                 while True:
10571
10572                         if not self._keep_scheduling():
10573                                 return bool(state_change)
10574
10575                         if self._choose_pkg_return_early or \
10576                                 not self._can_add_job() or \
10577                                 self._job_delay():
10578                                 return bool(state_change)
10579
10580                         pkg = self._choose_pkg()
10581                         if pkg is None:
10582                                 return bool(state_change)
10583
10584                         state_change += 1
10585
10586                         if not pkg.installed:
10587                                 self._pkg_count.curval += 1
10588
10589                         task = self._task(pkg)
10590
10591                         if pkg.installed:
10592                                 merge = PackageMerge(merge=task)
10593                                 merge.addExitListener(self._merge_exit)
10594                                 self._task_queues.merge.add(merge)
10595
10596                         elif pkg.built:
10597                                 self._jobs += 1
10598                                 self._previous_job_start_time = time.time()
10599                                 self._status_display.running = self._jobs
10600                                 task.addExitListener(self._extract_exit)
10601                                 self._task_queues.jobs.add(task)
10602
10603                         else:
10604                                 self._jobs += 1
10605                                 self._previous_job_start_time = time.time()
10606                                 self._status_display.running = self._jobs
10607                                 task.addExitListener(self._build_exit)
10608                                 self._task_queues.jobs.add(task)
10609
10610                 return bool(state_change)
10611
10612         def _task(self, pkg):
10613
10614                 pkg_to_replace = None
10615                 if pkg.operation != "uninstall":
10616                         vardb = pkg.root_config.trees["vartree"].dbapi
10617                         previous_cpv = vardb.match(pkg.slot_atom)
10618                         if previous_cpv:
10619                                 previous_cpv = previous_cpv.pop()
10620                                 pkg_to_replace = self._pkg(previous_cpv,
10621                                         "installed", pkg.root_config, installed=True)
10622
10623                 task = MergeListItem(args_set=self._args_set,
10624                         background=self._background, binpkg_opts=self._binpkg_opts,
10625                         build_opts=self._build_opts,
10626                         config_pool=self._ConfigPool(pkg.root,
10627                         self._allocate_config, self._deallocate_config),
10628                         emerge_opts=self.myopts,
10629                         find_blockers=self._find_blockers(pkg), logger=self._logger,
10630                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10631                         pkg_to_replace=pkg_to_replace,
10632                         prefetcher=self._prefetchers.get(pkg),
10633                         scheduler=self._sched_iface,
10634                         settings=self._allocate_config(pkg.root),
10635                         statusMessage=self._status_msg,
10636                         world_atom=self._world_atom)
10637
10638                 return task
10639
10640         def _failed_pkg_msg(self, failed_pkg, action, preposition):
10641                 pkg = failed_pkg.pkg
10642                 msg = "%s to %s %s" % \
10643                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10644                 if pkg.root != "/":
10645                         msg += " %s %s" % (preposition, pkg.root)
10646
10647                 log_path = self._locate_failure_log(failed_pkg)
10648                 if log_path is not None:
10649                         msg += ", Log file:"
10650                 self._status_msg(msg)
10651
10652                 if log_path is not None:
10653                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10654
10655         def _status_msg(self, msg):
10656                 """
10657                 Display a brief status message (no newlines) in the status display.
10658                 This is called by tasks to provide feedback to the user. This
10659                 delegates the resposibility of generating \r and \n control characters,
10660                 to guarantee that lines are created or erased when necessary and
10661                 appropriate.
10662
10663                 @type msg: str
10664                 @param msg: a brief status message (no newlines allowed)
10665                 """
10666                 if not self._background:
10667                         writemsg_level("\n")
10668                 self._status_display.displayMessage(msg)
10669
10670         def _save_resume_list(self):
10671                 """
10672                 Do this before verifying the ebuild Manifests since it might
10673                 be possible for the user to use --resume --skipfirst get past
10674                 a non-essential package with a broken digest.
10675                 """
10676                 mtimedb = self._mtimedb
10677                 mtimedb["resume"]["mergelist"] = [list(x) \
10678                         for x in self._mergelist \
10679                         if isinstance(x, Package) and x.operation == "merge"]
10680
10681                 mtimedb.commit()
10682
10683         def _calc_resume_list(self):
10684                 """
10685                 Use the current resume list to calculate a new one,
10686                 dropping any packages with unsatisfied deps.
10687                 @rtype: bool
10688                 @returns: True if successful, False otherwise.
10689                 """
10690                 print colorize("GOOD", "*** Resuming merge...")
10691
10692                 if self._show_list():
10693                         if "--tree" in self.myopts:
10694                                 portage.writemsg_stdout("\n" + \
10695                                         darkgreen("These are the packages that " + \
10696                                         "would be merged, in reverse order:\n\n"))
10697
10698                         else:
10699                                 portage.writemsg_stdout("\n" + \
10700                                         darkgreen("These are the packages that " + \
10701                                         "would be merged, in order:\n\n"))
10702
10703                 show_spinner = "--quiet" not in self.myopts and \
10704                         "--nodeps" not in self.myopts
10705
10706                 if show_spinner:
10707                         print "Calculating dependencies  ",
10708
10709                 myparams = create_depgraph_params(self.myopts, None)
10710                 success = False
10711                 e = None
10712                 try:
10713                         success, mydepgraph, dropped_tasks = resume_depgraph(
10714                                 self.settings, self.trees, self._mtimedb, self.myopts,
10715                                 myparams, self._spinner, skip_unsatisfied=True)
10716                 except depgraph.UnsatisfiedResumeDep, e:
10717                         mydepgraph = e.depgraph
10718                         dropped_tasks = set()
10719
10720                 if show_spinner:
10721                         print "\b\b... done!"
10722
10723                 if e is not None:
10724                         def unsatisfied_resume_dep_msg():
10725                                 mydepgraph.display_problems()
10726                                 out = portage.output.EOutput()
10727                                 out.eerror("One or more packages are either masked or " + \
10728                                         "have missing dependencies:")
10729                                 out.eerror("")
10730                                 indent = "  "
10731                                 show_parents = set()
10732                                 for dep in e.value:
10733                                         if dep.parent in show_parents:
10734                                                 continue
10735                                         show_parents.add(dep.parent)
10736                                         if dep.atom is None:
10737                                                 out.eerror(indent + "Masked package:")
10738                                                 out.eerror(2 * indent + str(dep.parent))
10739                                                 out.eerror("")
10740                                         else:
10741                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
10742                                                 out.eerror(2 * indent + str(dep.parent))
10743                                                 out.eerror("")
10744                                 msg = "The resume list contains packages " + \
10745                                         "that are either masked or have " + \
10746                                         "unsatisfied dependencies. " + \
10747                                         "Please restart/continue " + \
10748                                         "the operation manually, or use --skipfirst " + \
10749                                         "to skip the first package in the list and " + \
10750                                         "any other packages that may be " + \
10751                                         "masked or have missing dependencies."
10752                                 for line in textwrap.wrap(msg, 72):
10753                                         out.eerror(line)
10754                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10755                         return False
10756
10757                 if success and self._show_list():
10758                         mylist = mydepgraph.altlist()
10759                         if mylist:
10760                                 if "--tree" in self.myopts:
10761                                         mylist.reverse()
10762                                 mydepgraph.display(mylist, favorites=self._favorites)
10763
10764                 if not success:
10765                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10766                         return False
10767                 mydepgraph.display_problems()
10768
10769                 mylist = mydepgraph.altlist()
10770                 mydepgraph.break_refs(mylist)
10771                 mydepgraph.break_refs(dropped_tasks)
10772                 self._mergelist = mylist
10773                 self._set_digraph(mydepgraph.schedulerGraph())
10774
10775                 msg_width = 75
10776                 for task in dropped_tasks:
10777                         if not (isinstance(task, Package) and task.operation == "merge"):
10778                                 continue
10779                         pkg = task
10780                         msg = "emerge --keep-going:" + \
10781                                 " %s" % (pkg.cpv,)
10782                         if pkg.root != "/":
10783                                 msg += " for %s" % (pkg.root,)
10784                         msg += " dropped due to unsatisfied dependency."
10785                         for line in textwrap.wrap(msg, msg_width):
10786                                 eerror(line, phase="other", key=pkg.cpv)
10787                         settings = self.pkgsettings[pkg.root]
10788                         # Ensure that log collection from $T is disabled inside
10789                         # elog_process(), since any logs that might exist are
10790                         # not valid here.
10791                         settings.pop("T", None)
10792                         portage.elog.elog_process(pkg.cpv, settings)
10793                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10794
10795                 return True
10796
10797         def _show_list(self):
10798                 myopts = self.myopts
10799                 if "--quiet" not in myopts and \
10800                         ("--ask" in myopts or "--tree" in myopts or \
10801                         "--verbose" in myopts):
10802                         return True
10803                 return False
10804
10805         def _world_atom(self, pkg):
10806                 """
10807                 Add the package to the world file, but only if
10808                 it's supposed to be added. Otherwise, do nothing.
10809                 """
10810
10811                 if set(("--buildpkgonly", "--fetchonly",
10812                         "--fetch-all-uri",
10813                         "--oneshot", "--onlydeps",
10814                         "--pretend")).intersection(self.myopts):
10815                         return
10816
10817                 if pkg.root != self.target_root:
10818                         return
10819
10820                 args_set = self._args_set
10821                 if not args_set.findAtomForPackage(pkg):
10822                         return
10823
10824                 logger = self._logger
10825                 pkg_count = self._pkg_count
10826                 root_config = pkg.root_config
10827                 world_set = root_config.sets["world"]
10828                 world_locked = False
10829                 if hasattr(world_set, "lock"):
10830                         world_set.lock()
10831                         world_locked = True
10832
10833                 try:
10834                         if hasattr(world_set, "load"):
10835                                 world_set.load() # maybe it's changed on disk
10836
10837                         atom = create_world_atom(pkg, args_set, root_config)
10838                         if atom:
10839                                 if hasattr(world_set, "add"):
10840                                         self._status_msg(('Recording %s in "world" ' + \
10841                                                 'favorites file...') % atom)
10842                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
10843                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10844                                         world_set.add(atom)
10845                                 else:
10846                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10847                                                 (atom,), level=logging.WARN, noiselevel=-1)
10848                 finally:
10849                         if world_locked:
10850                                 world_set.unlock()
10851
10852         def _pkg(self, cpv, type_name, root_config, installed=False):
10853                 """
10854                 Get a package instance from the cache, or create a new
10855                 one if necessary. Raises KeyError from aux_get if it
10856                 failures for some reason (package does not exist or is
10857                 corrupt).
10858                 """
10859                 operation = "merge"
10860                 if installed:
10861                         operation = "nomerge"
10862
10863                 if self._digraph is not None:
10864                         # Reuse existing instance when available.
10865                         pkg = self._digraph.get(
10866                                 (type_name, root_config.root, cpv, operation))
10867                         if pkg is not None:
10868                                 return pkg
10869
10870                 tree_type = depgraph.pkg_tree_map[type_name]
10871                 db = root_config.trees[tree_type].dbapi
10872                 db_keys = list(self.trees[root_config.root][
10873                         tree_type].dbapi._aux_cache_keys)
10874                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10875                 pkg = Package(cpv=cpv, metadata=metadata,
10876                         root_config=root_config, installed=installed)
10877                 if type_name == "ebuild":
10878                         settings = self.pkgsettings[root_config.root]
10879                         settings.setcpv(pkg)
10880                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
10881
10882                 return pkg
10883
10884 class MetadataRegen(PollScheduler):
10885
10886         def __init__(self, portdb, max_jobs=None, max_load=None):
10887                 PollScheduler.__init__(self)
10888                 self._portdb = portdb
10889
10890                 if max_jobs is None:
10891                         max_jobs = 1
10892
10893                 self._max_jobs = max_jobs
10894                 self._max_load = max_load
10895                 self._sched_iface = self._sched_iface_class(
10896                         register=self._register,
10897                         schedule=self._schedule_wait,
10898                         unregister=self._unregister)
10899
10900                 self._valid_pkgs = set()
10901                 self._process_iter = self._iter_metadata_processes()
10902
10903         def _iter_metadata_processes(self):
10904                 portdb = self._portdb
10905                 valid_pkgs = self._valid_pkgs
10906                 every_cp = portdb.cp_all()
10907                 every_cp.sort(reverse=True)
10908
10909                 while every_cp:
10910                         cp = every_cp.pop()
10911                         portage.writemsg_stdout("Processing %s\n" % cp)
10912                         cpv_list = portdb.cp_list(cp)
10913                         for cpv in cpv_list:
10914                                 valid_pkgs.add(cpv)
10915                                 ebuild_path, repo_path = portdb.findname2(cpv)
10916                                 metadata_process = portdb._metadata_process(
10917                                         cpv, ebuild_path, repo_path)
10918                                 if metadata_process is None:
10919                                         continue
10920                                 yield metadata_process
10921
10922         def run(self):
10923
10924                 portdb = self._portdb
10925                 from portage.cache.cache_errors import CacheError
10926                 dead_nodes = {}
10927
10928                 for mytree in portdb.porttrees:
10929                         try:
10930                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10931                         except CacheError, e:
10932                                 portage.writemsg("Error listing cache entries for " + \
10933                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10934                                 del e
10935                                 dead_nodes = None
10936                                 break
10937
10938                 while self._schedule():
10939                         self._poll_loop()
10940
10941                 while self._jobs:
10942                         self._poll_loop()
10943
10944                 if dead_nodes:
10945                         for y in self._valid_pkgs:
10946                                 for mytree in portdb.porttrees:
10947                                         if portdb.findname2(y, mytree=mytree)[0]:
10948                                                 dead_nodes[mytree].discard(y)
10949
10950                         for mytree, nodes in dead_nodes.iteritems():
10951                                 auxdb = portdb.auxdb[mytree]
10952                                 for y in nodes:
10953                                         try:
10954                                                 del auxdb[y]
10955                                         except (KeyError, CacheError):
10956                                                 pass
10957
10958         def _schedule_tasks(self):
10959                 """
10960                 @rtype: bool
10961                 @returns: True if there may be remaining tasks to schedule,
10962                         False otherwise.
10963                 """
10964                 while self._can_add_job():
10965                         try:
10966                                 metadata_process = self._process_iter.next()
10967                         except StopIteration:
10968                                 return False
10969
10970                         self._jobs += 1
10971                         metadata_process.scheduler = self._sched_iface
10972                         metadata_process.addExitListener(self._metadata_exit)
10973                         metadata_process.start()
10974                 return True
10975
10976         def _metadata_exit(self, metadata_process):
10977                 self._jobs -= 1
10978                 if metadata_process.returncode != os.EX_OK:
10979                         self._valid_pkgs.discard(metadata_process.cpv)
10980                         portage.writemsg("Error processing %s, continuing...\n" % \
10981                                 (metadata_process.cpv,))
10982                 self._schedule()
10983
10984 class UninstallFailure(portage.exception.PortageException):
10985         """
10986         An instance of this class is raised by unmerge() when
10987         an uninstallation fails.
10988         """
10989         status = 1
10990         def __init__(self, *pargs):
10991                 portage.exception.PortageException.__init__(self, pargs)
10992                 if pargs:
10993                         self.status = pargs[0]
10994
10995 def unmerge(root_config, myopts, unmerge_action,
10996         unmerge_files, ldpath_mtimes, autoclean=0,
10997         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10998         scheduler=None, writemsg_level=portage.util.writemsg_level):
10999
11000         quiet = "--quiet" in myopts
11001         settings = root_config.settings
11002         sets = root_config.sets
11003         vartree = root_config.trees["vartree"]
11004         candidate_catpkgs=[]
11005         global_unmerge=0
11006         xterm_titles = "notitles" not in settings.features
11007         out = portage.output.EOutput()
11008         pkg_cache = {}
11009         db_keys = list(vartree.dbapi._aux_cache_keys)
11010
11011         def _pkg(cpv):
11012                 pkg = pkg_cache.get(cpv)
11013                 if pkg is None:
11014                         pkg = Package(cpv=cpv, installed=True,
11015                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11016                                 root_config=root_config,
11017                                 type_name="installed")
11018                         pkg_cache[cpv] = pkg
11019                 return pkg
11020
11021         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11022         try:
11023                 # At least the parent needs to exist for the lock file.
11024                 portage.util.ensure_dirs(vdb_path)
11025         except portage.exception.PortageException:
11026                 pass
11027         vdb_lock = None
11028         try:
11029                 if os.access(vdb_path, os.W_OK):
11030                         vdb_lock = portage.locks.lockdir(vdb_path)
11031                 realsyslist = sets["system"].getAtoms()
11032                 syslist = []
11033                 for x in realsyslist:
11034                         mycp = portage.dep_getkey(x)
11035                         if mycp in settings.getvirtuals():
11036                                 providers = []
11037                                 for provider in settings.getvirtuals()[mycp]:
11038                                         if vartree.dbapi.match(provider):
11039                                                 providers.append(provider)
11040                                 if len(providers) == 1:
11041                                         syslist.extend(providers)
11042                         else:
11043                                 syslist.append(mycp)
11044         
11045                 mysettings = portage.config(clone=settings)
11046         
11047                 if not unmerge_files:
11048                         if unmerge_action == "unmerge":
11049                                 print
11050                                 print bold("emerge unmerge") + " can only be used with specific package names"
11051                                 print
11052                                 return 0
11053                         else:
11054                                 global_unmerge = 1
11055         
11056                 localtree = vartree
11057                 # process all arguments and add all
11058                 # valid db entries to candidate_catpkgs
11059                 if global_unmerge:
11060                         if not unmerge_files:
11061                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11062                 else:
11063                         #we've got command-line arguments
11064                         if not unmerge_files:
11065                                 print "\nNo packages to unmerge have been provided.\n"
11066                                 return 0
11067                         for x in unmerge_files:
11068                                 arg_parts = x.split('/')
11069                                 if x[0] not in [".","/"] and \
11070                                         arg_parts[-1][-7:] != ".ebuild":
11071                                         #possible cat/pkg or dep; treat as such
11072                                         candidate_catpkgs.append(x)
11073                                 elif unmerge_action in ["prune","clean"]:
11074                                         print "\n!!! Prune and clean do not accept individual" + \
11075                                                 " ebuilds as arguments;\n    skipping.\n"
11076                                         continue
11077                                 else:
11078                                         # it appears that the user is specifying an installed
11079                                         # ebuild and we're in "unmerge" mode, so it's ok.
11080                                         if not os.path.exists(x):
11081                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11082                                                 return 0
11083         
11084                                         absx   = os.path.abspath(x)
11085                                         sp_absx = absx.split("/")
11086                                         if sp_absx[-1][-7:] == ".ebuild":
11087                                                 del sp_absx[-1]
11088                                                 absx = "/".join(sp_absx)
11089         
11090                                         sp_absx_len = len(sp_absx)
11091         
11092                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11093                                         vdb_len  = len(vdb_path)
11094         
11095                                         sp_vdb     = vdb_path.split("/")
11096                                         sp_vdb_len = len(sp_vdb)
11097         
11098                                         if not os.path.exists(absx+"/CONTENTS"):
11099                                                 print "!!! Not a valid db dir: "+str(absx)
11100                                                 return 0
11101         
11102                                         if sp_absx_len <= sp_vdb_len:
11103                                                 # The Path is shorter... so it can't be inside the vdb.
11104                                                 print sp_absx
11105                                                 print absx
11106                                                 print "\n!!!",x,"cannot be inside "+ \
11107                                                         vdb_path+"; aborting.\n"
11108                                                 return 0
11109         
11110                                         for idx in range(0,sp_vdb_len):
11111                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11112                                                         print sp_absx
11113                                                         print absx
11114                                                         print "\n!!!", x, "is not inside "+\
11115                                                                 vdb_path+"; aborting.\n"
11116                                                         return 0
11117         
11118                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11119                                         candidate_catpkgs.append(
11120                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11121         
11122                 newline=""
11123                 if (not "--quiet" in myopts):
11124                         newline="\n"
11125                 if settings["ROOT"] != "/":
11126                         writemsg_level(darkgreen(newline+ \
11127                                 ">>> Using system located in ROOT tree %s\n" % \
11128                                 settings["ROOT"]))
11129
11130                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11131                         not ("--quiet" in myopts):
11132                         writemsg_level(darkgreen(newline+\
11133                                 ">>> These are the packages that would be unmerged:\n"))
11134
11135                 # Preservation of order is required for --depclean and --prune so
11136                 # that dependencies are respected. Use all_selected to eliminate
11137                 # duplicate packages since the same package may be selected by
11138                 # multiple atoms.
11139                 pkgmap = []
11140                 all_selected = set()
11141                 for x in candidate_catpkgs:
11142                         # cycle through all our candidate deps and determine
11143                         # what will and will not get unmerged
11144                         try:
11145                                 mymatch = vartree.dbapi.match(x)
11146                         except portage.exception.AmbiguousPackageName, errpkgs:
11147                                 print "\n\n!!! The short ebuild name \"" + \
11148                                         x + "\" is ambiguous.  Please specify"
11149                                 print "!!! one of the following fully-qualified " + \
11150                                         "ebuild names instead:\n"
11151                                 for i in errpkgs[0]:
11152                                         print "    " + green(i)
11153                                 print
11154                                 sys.exit(1)
11155         
11156                         if not mymatch and x[0] not in "<>=~":
11157                                 mymatch = localtree.dep_match(x)
11158                         if not mymatch:
11159                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11160                                         (x, unmerge_action), noiselevel=-1)
11161                                 continue
11162
11163                         pkgmap.append(
11164                                 {"protected": set(), "selected": set(), "omitted": set()})
11165                         mykey = len(pkgmap) - 1
11166                         if unmerge_action=="unmerge":
11167                                         for y in mymatch:
11168                                                 if y not in all_selected:
11169                                                         pkgmap[mykey]["selected"].add(y)
11170                                                         all_selected.add(y)
11171                         elif unmerge_action == "prune":
11172                                 if len(mymatch) == 1:
11173                                         continue
11174                                 best_version = mymatch[0]
11175                                 best_slot = vartree.getslot(best_version)
11176                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11177                                 for mypkg in mymatch[1:]:
11178                                         myslot = vartree.getslot(mypkg)
11179                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11180                                         if (myslot == best_slot and mycounter > best_counter) or \
11181                                                 mypkg == portage.best([mypkg, best_version]):
11182                                                 if myslot == best_slot:
11183                                                         if mycounter < best_counter:
11184                                                                 # On slot collision, keep the one with the
11185                                                                 # highest counter since it is the most
11186                                                                 # recently installed.
11187                                                                 continue
11188                                                 best_version = mypkg
11189                                                 best_slot = myslot
11190                                                 best_counter = mycounter
11191                                 pkgmap[mykey]["protected"].add(best_version)
11192                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11193                                         if mypkg != best_version and mypkg not in all_selected)
11194                                 all_selected.update(pkgmap[mykey]["selected"])
11195                         else:
11196                                 # unmerge_action == "clean"
11197                                 slotmap={}
11198                                 for mypkg in mymatch:
11199                                         if unmerge_action == "clean":
11200                                                 myslot = localtree.getslot(mypkg)
11201                                         else:
11202                                                 # since we're pruning, we don't care about slots
11203                                                 # and put all the pkgs in together
11204                                                 myslot = 0
11205                                         if myslot not in slotmap:
11206                                                 slotmap[myslot] = {}
11207                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11208
11209                                 for mypkg in vartree.dbapi.cp_list(
11210                                         portage.dep_getkey(mymatch[0])):
11211                                         myslot = vartree.getslot(mypkg)
11212                                         if myslot not in slotmap:
11213                                                 slotmap[myslot] = {}
11214                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11215
11216                                 for myslot in slotmap:
11217                                         counterkeys = slotmap[myslot].keys()
11218                                         if not counterkeys:
11219                                                 continue
11220                                         counterkeys.sort()
11221                                         pkgmap[mykey]["protected"].add(
11222                                                 slotmap[myslot][counterkeys[-1]])
11223                                         del counterkeys[-1]
11224
11225                                         for counter in counterkeys[:]:
11226                                                 mypkg = slotmap[myslot][counter]
11227                                                 if mypkg not in mymatch:
11228                                                         counterkeys.remove(counter)
11229                                                         pkgmap[mykey]["protected"].add(
11230                                                                 slotmap[myslot][counter])
11231
11232                                         #be pretty and get them in order of merge:
11233                                         for ckey in counterkeys:
11234                                                 mypkg = slotmap[myslot][ckey]
11235                                                 if mypkg not in all_selected:
11236                                                         pkgmap[mykey]["selected"].add(mypkg)
11237                                                         all_selected.add(mypkg)
11238                                         # ok, now the last-merged package
11239                                         # is protected, and the rest are selected
11240                 numselected = len(all_selected)
11241                 if global_unmerge and not numselected:
11242                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11243                         return 0
11244         
11245                 if not numselected:
11246                         portage.writemsg_stdout(
11247                                 "\n>>> No packages selected for removal by " + \
11248                                 unmerge_action + "\n")
11249                         return 0
11250         finally:
11251                 if vdb_lock:
11252                         vartree.dbapi.flush_cache()
11253                         portage.locks.unlockdir(vdb_lock)
11254         
11255         from portage.sets.base import EditablePackageSet
11256         
11257         # generate a list of package sets that are directly or indirectly listed in "world",
11258         # as there is no persistent list of "installed" sets
11259         installed_sets = ["world"]
11260         stop = False
11261         pos = 0
11262         while not stop:
11263                 stop = True
11264                 pos = len(installed_sets)
11265                 for s in installed_sets[pos - 1:]:
11266                         if s not in sets:
11267                                 continue
11268                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11269                         if candidates:
11270                                 stop = False
11271                                 installed_sets += candidates
11272         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11273         del stop, pos
11274
11275         # we don't want to unmerge packages that are still listed in user-editable package sets
11276         # listed in "world" as they would be remerged on the next update of "world" or the 
11277         # relevant package sets.
11278         unknown_sets = set()
11279         for cp in xrange(len(pkgmap)):
11280                 for cpv in pkgmap[cp]["selected"].copy():
11281                         try:
11282                                 pkg = _pkg(cpv)
11283                         except KeyError:
11284                                 # It could have been uninstalled
11285                                 # by a concurrent process.
11286                                 continue
11287
11288                         if unmerge_action != "clean" and \
11289                                 root_config.root == "/" and \
11290                                 portage.match_from_list(
11291                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11292                                 msg = ("Not unmerging package %s since there is no valid " + \
11293                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11294                                 for line in textwrap.wrap(msg, 75):
11295                                         out.eerror(line)
11296                                 # adjust pkgmap so the display output is correct
11297                                 pkgmap[cp]["selected"].remove(cpv)
11298                                 all_selected.remove(cpv)
11299                                 pkgmap[cp]["protected"].add(cpv)
11300                                 continue
11301
11302                         parents = []
11303                         for s in installed_sets:
11304                                 # skip sets that the user requested to unmerge, and skip world 
11305                                 # unless we're unmerging a package set (as the package would be 
11306                                 # removed from "world" later on)
11307                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11308                                         continue
11309
11310                                 if s not in sets:
11311                                         if s in unknown_sets:
11312                                                 continue
11313                                         unknown_sets.add(s)
11314                                         out = portage.output.EOutput()
11315                                         out.eerror(("Unknown set '@%s' in " + \
11316                                                 "%svar/lib/portage/world_sets") % \
11317                                                 (s, root_config.root))
11318                                         continue
11319
11320                                 # only check instances of EditablePackageSet as other classes are generally used for
11321                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11322                                 # user can't do much about them anyway)
11323                                 if isinstance(sets[s], EditablePackageSet):
11324
11325                                         # This is derived from a snippet of code in the
11326                                         # depgraph._iter_atoms_for_pkg() method.
11327                                         for atom in sets[s].iterAtomsForPackage(pkg):
11328                                                 inst_matches = vartree.dbapi.match(atom)
11329                                                 inst_matches.reverse() # descending order
11330                                                 higher_slot = None
11331                                                 for inst_cpv in inst_matches:
11332                                                         try:
11333                                                                 inst_pkg = _pkg(inst_cpv)
11334                                                         except KeyError:
11335                                                                 # It could have been uninstalled
11336                                                                 # by a concurrent process.
11337                                                                 continue
11338
11339                                                         if inst_pkg.cp != atom.cp:
11340                                                                 continue
11341                                                         if pkg >= inst_pkg:
11342                                                                 # This is descending order, and we're not
11343                                                                 # interested in any versions <= pkg given.
11344                                                                 break
11345                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11346                                                                 higher_slot = inst_pkg
11347                                                                 break
11348                                                 if higher_slot is None:
11349                                                         parents.append(s)
11350                                                         break
11351                         if parents:
11352                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11353                                 #print colorize("WARN", "but still listed in the following package sets:")
11354                                 #print "    %s\n" % ", ".join(parents)
11355                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11356                                 print colorize("WARN", "still referenced by the following package sets:")
11357                                 print "    %s\n" % ", ".join(parents)
11358                                 # adjust pkgmap so the display output is correct
11359                                 pkgmap[cp]["selected"].remove(cpv)
11360                                 all_selected.remove(cpv)
11361                                 pkgmap[cp]["protected"].add(cpv)
11362         
11363         del installed_sets
11364
11365         numselected = len(all_selected)
11366         if not numselected:
11367                 writemsg_level(
11368                         "\n>>> No packages selected for removal by " + \
11369                         unmerge_action + "\n")
11370                 return 0
11371
11372         # Unmerge order only matters in some cases
11373         if not ordered:
11374                 unordered = {}
11375                 for d in pkgmap:
11376                         selected = d["selected"]
11377                         if not selected:
11378                                 continue
11379                         cp = portage.cpv_getkey(iter(selected).next())
11380                         cp_dict = unordered.get(cp)
11381                         if cp_dict is None:
11382                                 cp_dict = {}
11383                                 unordered[cp] = cp_dict
11384                                 for k in d:
11385                                         cp_dict[k] = set()
11386                         for k, v in d.iteritems():
11387                                 cp_dict[k].update(v)
11388                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11389
11390         for x in xrange(len(pkgmap)):
11391                 selected = pkgmap[x]["selected"]
11392                 if not selected:
11393                         continue
11394                 for mytype, mylist in pkgmap[x].iteritems():
11395                         if mytype == "selected":
11396                                 continue
11397                         mylist.difference_update(all_selected)
11398                 cp = portage.cpv_getkey(iter(selected).next())
11399                 for y in localtree.dep_match(cp):
11400                         if y not in pkgmap[x]["omitted"] and \
11401                                 y not in pkgmap[x]["selected"] and \
11402                                 y not in pkgmap[x]["protected"] and \
11403                                 y not in all_selected:
11404                                 pkgmap[x]["omitted"].add(y)
11405                 if global_unmerge and not pkgmap[x]["selected"]:
11406                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11407                         continue
11408                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11409                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11410                                 "'%s' is part of your system profile.\n" % cp),
11411                                 level=logging.WARNING, noiselevel=-1)
11412                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11413                                 "be damaging to your system.\n\n"),
11414                                 level=logging.WARNING, noiselevel=-1)
11415                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11416                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11417                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11418                 if not quiet:
11419                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11420                 else:
11421                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11422                 for mytype in ["selected","protected","omitted"]:
11423                         if not quiet:
11424                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11425                         if pkgmap[x][mytype]:
11426                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11427                                 sorted_pkgs.sort(portage.pkgcmp)
11428                                 for pn, ver, rev in sorted_pkgs:
11429                                         if rev == "r0":
11430                                                 myversion = ver
11431                                         else:
11432                                                 myversion = ver + "-" + rev
11433                                         if mytype == "selected":
11434                                                 writemsg_level(
11435                                                         colorize("UNMERGE_WARN", myversion + " "),
11436                                                         noiselevel=-1)
11437                                         else:
11438                                                 writemsg_level(
11439                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
11440                         else:
11441                                 writemsg_level("none ", noiselevel=-1)
11442                         if not quiet:
11443                                 writemsg_level("\n", noiselevel=-1)
11444                 if quiet:
11445                         writemsg_level("\n", noiselevel=-1)
11446
11447         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11448                 " packages are slated for removal.\n")
11449         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11450                         " and " + colorize("GOOD", "'omitted'") + \
11451                         " packages will not be removed.\n\n")
11452
11453         if "--pretend" in myopts:
11454                 #we're done... return
11455                 return 0
11456         if "--ask" in myopts:
11457                 if userquery("Would you like to unmerge these packages?")=="No":
11458                         # enter pretend mode for correct formatting of results
11459                         myopts["--pretend"] = True
11460                         print
11461                         print "Quitting."
11462                         print
11463                         return 0
11464         #the real unmerging begins, after a short delay....
11465         if clean_delay and not autoclean:
11466                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11467
11468         for x in xrange(len(pkgmap)):
11469                 for y in pkgmap[x]["selected"]:
11470                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11471                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11472                         mysplit = y.split("/")
11473                         #unmerge...
11474                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11475                                 mysettings, unmerge_action not in ["clean","prune"],
11476                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11477                                 scheduler=scheduler)
11478
11479                         if retval != os.EX_OK:
11480                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11481                                 if raise_on_error:
11482                                         raise UninstallFailure(retval)
11483                                 sys.exit(retval)
11484                         else:
11485                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
11486                                         sets["world"].cleanPackage(vartree.dbapi, y)
11487                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
11488         if clean_world and hasattr(sets["world"], "remove"):
11489                 for s in root_config.setconfig.active:
11490                         sets["world"].remove(SETPREFIX+s)
11491         return 1
11492
11493 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11494
11495         if os.path.exists("/usr/bin/install-info"):
11496                 out = portage.output.EOutput()
11497                 regen_infodirs=[]
11498                 for z in infodirs:
11499                         if z=='':
11500                                 continue
11501                         inforoot=normpath(root+z)
11502                         if os.path.isdir(inforoot):
11503                                 infomtime = long(os.stat(inforoot).st_mtime)
11504                                 if inforoot not in prev_mtimes or \
11505                                         prev_mtimes[inforoot] != infomtime:
11506                                                 regen_infodirs.append(inforoot)
11507
11508                 if not regen_infodirs:
11509                         portage.writemsg_stdout("\n")
11510                         out.einfo("GNU info directory index is up-to-date.")
11511                 else:
11512                         portage.writemsg_stdout("\n")
11513                         out.einfo("Regenerating GNU info directory index...")
11514
11515                         dir_extensions = ("", ".gz", ".bz2")
11516                         icount=0
11517                         badcount=0
11518                         errmsg = ""
11519                         for inforoot in regen_infodirs:
11520                                 if inforoot=='':
11521                                         continue
11522
11523                                 if not os.path.isdir(inforoot) or \
11524                                         not os.access(inforoot, os.W_OK):
11525                                         continue
11526
11527                                 file_list = os.listdir(inforoot)
11528                                 file_list.sort()
11529                                 dir_file = os.path.join(inforoot, "dir")
11530                                 moved_old_dir = False
11531                                 processed_count = 0
11532                                 for x in file_list:
11533                                         if x.startswith(".") or \
11534                                                 os.path.isdir(os.path.join(inforoot, x)):
11535                                                 continue
11536                                         if x.startswith("dir"):
11537                                                 skip = False
11538                                                 for ext in dir_extensions:
11539                                                         if x == "dir" + ext or \
11540                                                                 x == "dir" + ext + ".old":
11541                                                                 skip = True
11542                                                                 break
11543                                                 if skip:
11544                                                         continue
11545                                         if processed_count == 0:
11546                                                 for ext in dir_extensions:
11547                                                         try:
11548                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
11549                                                                 moved_old_dir = True
11550                                                         except EnvironmentError, e:
11551                                                                 if e.errno != errno.ENOENT:
11552                                                                         raise
11553                                                                 del e
11554                                         processed_count += 1
11555                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11556                                         existsstr="already exists, for file `"
11557                                         if myso!="":
11558                                                 if re.search(existsstr,myso):
11559                                                         # Already exists... Don't increment the count for this.
11560                                                         pass
11561                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
11562                                                         # This info file doesn't contain a DIR-header: install-info produces this
11563                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
11564                                                         # Don't increment the count for this.
11565                                                         pass
11566                                                 else:
11567                                                         badcount=badcount+1
11568                                                         errmsg += myso + "\n"
11569                                         icount=icount+1
11570
11571                                 if moved_old_dir and not os.path.exists(dir_file):
11572                                         # We didn't generate a new dir file, so put the old file
11573                                         # back where it was originally found.
11574                                         for ext in dir_extensions:
11575                                                 try:
11576                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
11577                                                 except EnvironmentError, e:
11578                                                         if e.errno != errno.ENOENT:
11579                                                                 raise
11580                                                         del e
11581
11582                                 # Clean dir.old cruft so that they don't prevent
11583                                 # unmerge of otherwise empty directories.
11584                                 for ext in dir_extensions:
11585                                         try:
11586                                                 os.unlink(dir_file + ext + ".old")
11587                                         except EnvironmentError, e:
11588                                                 if e.errno != errno.ENOENT:
11589                                                         raise
11590                                                 del e
11591
11592                                 #update mtime so we can potentially avoid regenerating.
11593                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11594
11595                         if badcount:
11596                                 out.eerror("Processed %d info files; %d errors." % \
11597                                         (icount, badcount))
11598                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11599                         else:
11600                                 if icount > 0:
11601                                         out.einfo("Processed %d info files." % (icount,))
11602
11603
11604 def display_news_notification(root_config, myopts):
11605         target_root = root_config.root
11606         trees = root_config.trees
11607         settings = trees["vartree"].settings
11608         portdb = trees["porttree"].dbapi
11609         vardb = trees["vartree"].dbapi
11610         NEWS_PATH = os.path.join("metadata", "news")
11611         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11612         newsReaderDisplay = False
11613         update = "--pretend" not in myopts
11614
11615         for repo in portdb.getRepositories():
11616                 unreadItems = checkUpdatedNewsItems(
11617                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11618                 if unreadItems:
11619                         if not newsReaderDisplay:
11620                                 newsReaderDisplay = True
11621                                 print
11622                         print colorize("WARN", " * IMPORTANT:"),
11623                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11624                         
11625         
11626         if newsReaderDisplay:
11627                 print colorize("WARN", " *"),
11628                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11629                 print
11630
11631 def display_preserved_libs(vardbapi):
11632         MAX_DISPLAY = 3
11633
11634         # Ensure the registry is consistent with existing files.
11635         vardbapi.plib_registry.pruneNonExisting()
11636
11637         if vardbapi.plib_registry.hasEntries():
11638                 print
11639                 print colorize("WARN", "!!!") + " existing preserved libs:"
11640                 plibdata = vardbapi.plib_registry.getPreservedLibs()
11641                 linkmap = vardbapi.linkmap
11642                 consumer_map = {}
11643                 owners = {}
11644                 linkmap_broken = False
11645
11646                 try:
11647                         linkmap.rebuild()
11648                 except portage.exception.CommandNotFound, e:
11649                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
11650                                 level=logging.ERROR, noiselevel=-1)
11651                         del e
11652                         linkmap_broken = True
11653                 else:
11654                         search_for_owners = set()
11655                         for cpv in plibdata:
11656                                 internal_plib_keys = set(linkmap._obj_key(f) \
11657                                         for f in plibdata[cpv])
11658                                 for f in plibdata[cpv]:
11659                                         if f in consumer_map:
11660                                                 continue
11661                                         consumers = []
11662                                         for c in linkmap.findConsumers(f):
11663                                                 # Filter out any consumers that are also preserved libs
11664                                                 # belonging to the same package as the provider.
11665                                                 if linkmap._obj_key(c) not in internal_plib_keys:
11666                                                         consumers.append(c)
11667                                         consumers.sort()
11668                                         consumer_map[f] = consumers
11669                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
11670
11671                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11672
11673                 for cpv in plibdata:
11674                         print colorize("WARN", ">>>") + " package: %s" % cpv
11675                         samefile_map = {}
11676                         for f in plibdata[cpv]:
11677                                 obj_key = linkmap._obj_key(f)
11678                                 alt_paths = samefile_map.get(obj_key)
11679                                 if alt_paths is None:
11680                                         alt_paths = set()
11681                                         samefile_map[obj_key] = alt_paths
11682                                 alt_paths.add(f)
11683
11684                         for alt_paths in samefile_map.itervalues():
11685                                 alt_paths = sorted(alt_paths)
11686                                 for p in alt_paths:
11687                                         print colorize("WARN", " * ") + " - %s" % (p,)
11688                                 f = alt_paths[0]
11689                                 consumers = consumer_map.get(f, [])
11690                                 for c in consumers[:MAX_DISPLAY]:
11691                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11692                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11693                                 if len(consumers) == MAX_DISPLAY + 1:
11694                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11695                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11696                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
11697                                 elif len(consumers) > MAX_DISPLAY:
11698                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
11699                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11700
11701
11702 def _flush_elog_mod_echo():
11703         """
11704         Dump the mod_echo output now so that our other
11705         notifications are shown last.
11706         @rtype: bool
11707         @returns: True if messages were shown, False otherwise.
11708         """
11709         messages_shown = False
11710         try:
11711                 from portage.elog import mod_echo
11712         except ImportError:
11713                 pass # happens during downgrade to a version without the module
11714         else:
11715                 messages_shown = bool(mod_echo._items)
11716                 mod_echo.finalize()
11717         return messages_shown
11718
11719 def post_emerge(root_config, myopts, mtimedb, retval):
11720         """
11721         Misc. things to run at the end of a merge session.
11722         
11723         Update Info Files
11724         Update Config Files
11725         Update News Items
11726         Commit mtimeDB
11727         Display preserved libs warnings
11728         Exit Emerge
11729
11730         @param trees: A dictionary mapping each ROOT to it's package databases
11731         @type trees: dict
11732         @param mtimedb: The mtimeDB to store data needed across merge invocations
11733         @type mtimedb: MtimeDB class instance
11734         @param retval: Emerge's return value
11735         @type retval: Int
11736         @rype: None
11737         @returns:
11738         1.  Calls sys.exit(retval)
11739         """
11740
11741         target_root = root_config.root
11742         trees = { target_root : root_config.trees }
11743         vardbapi = trees[target_root]["vartree"].dbapi
11744         settings = vardbapi.settings
11745         info_mtimes = mtimedb["info"]
11746
11747         # Load the most current variables from ${ROOT}/etc/profile.env
11748         settings.unlock()
11749         settings.reload()
11750         settings.regenerate()
11751         settings.lock()
11752
11753         config_protect = settings.get("CONFIG_PROTECT","").split()
11754         infodirs = settings.get("INFOPATH","").split(":") + \
11755                 settings.get("INFODIR","").split(":")
11756
11757         os.chdir("/")
11758
11759         if retval == os.EX_OK:
11760                 exit_msg = " *** exiting successfully."
11761         else:
11762                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11763         emergelog("notitles" not in settings.features, exit_msg)
11764
11765         _flush_elog_mod_echo()
11766
11767         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11768         if counter_hash is not None and \
11769                 counter_hash == vardbapi._counter_hash():
11770                 display_news_notification(root_config, myopts)
11771                 # If vdb state has not changed then there's nothing else to do.
11772                 sys.exit(retval)
11773
11774         vdb_path = os.path.join(target_root, portage.VDB_PATH)
11775         portage.util.ensure_dirs(vdb_path)
11776         vdb_lock = None
11777         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11778                 vdb_lock = portage.locks.lockdir(vdb_path)
11779
11780         if vdb_lock:
11781                 try:
11782                         if "noinfo" not in settings.features:
11783                                 chk_updated_info_files(target_root,
11784                                         infodirs, info_mtimes, retval)
11785                         mtimedb.commit()
11786                 finally:
11787                         if vdb_lock:
11788                                 portage.locks.unlockdir(vdb_lock)
11789
11790         chk_updated_cfg_files(target_root, config_protect)
11791         
11792         display_news_notification(root_config, myopts)
11793         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11794                 display_preserved_libs(vardbapi)        
11795
11796         sys.exit(retval)
11797
11798
11799 def chk_updated_cfg_files(target_root, config_protect):
11800         if config_protect:
11801                 #number of directories with some protect files in them
11802                 procount=0
11803                 for x in config_protect:
11804                         x = os.path.join(target_root, x.lstrip(os.path.sep))
11805                         if not os.access(x, os.W_OK):
11806                                 # Avoid Permission denied errors generated
11807                                 # later by `find`.
11808                                 continue
11809                         try:
11810                                 mymode = os.lstat(x).st_mode
11811                         except OSError:
11812                                 continue
11813                         if stat.S_ISLNK(mymode):
11814                                 # We want to treat it like a directory if it
11815                                 # is a symlink to an existing directory.
11816                                 try:
11817                                         real_mode = os.stat(x).st_mode
11818                                         if stat.S_ISDIR(real_mode):
11819                                                 mymode = real_mode
11820                                 except OSError:
11821                                         pass
11822                         if stat.S_ISDIR(mymode):
11823                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11824                         else:
11825                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11826                                         os.path.split(x.rstrip(os.path.sep))
11827                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11828                         a = commands.getstatusoutput(mycommand)
11829                         if a[0] != 0:
11830                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11831                                 sys.stderr.flush()
11832                                 # Show the error message alone, sending stdout to /dev/null.
11833                                 os.system(mycommand + " 1>/dev/null")
11834                         else:
11835                                 files = a[1].split('\0')
11836                                 # split always produces an empty string as the last element
11837                                 if files and not files[-1]:
11838                                         del files[-1]
11839                                 if files:
11840                                         procount += 1
11841                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
11842                                         if stat.S_ISDIR(mymode):
11843                                                  print "%d config files in '%s' need updating." % \
11844                                                         (len(files), x)
11845                                         else:
11846                                                  print "config file '%s' needs updating." % x
11847
11848                 if procount:
11849                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11850                                 " section of the " + bold("emerge")
11851                         print " "+yellow("*")+" man page to learn how to update config files."
11852
11853 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11854         update=False):
11855         """
11856         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11857         Returns the number of unread (yet relevent) items.
11858         
11859         @param portdb: a portage tree database
11860         @type portdb: pordbapi
11861         @param vardb: an installed package database
11862         @type vardb: vardbapi
11863         @param NEWS_PATH:
11864         @type NEWS_PATH:
11865         @param UNREAD_PATH:
11866         @type UNREAD_PATH:
11867         @param repo_id:
11868         @type repo_id:
11869         @rtype: Integer
11870         @returns:
11871         1.  The number of unread but relevant news items.
11872         
11873         """
11874         from portage.news import NewsManager
11875         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11876         return manager.getUnreadItems( repo_id, update=update )
11877
11878 def insert_category_into_atom(atom, category):
11879         alphanum = re.search(r'\w', atom)
11880         if alphanum:
11881                 ret = atom[:alphanum.start()] + "%s/" % category + \
11882                         atom[alphanum.start():]
11883         else:
11884                 ret = None
11885         return ret
11886
11887 def is_valid_package_atom(x):
11888         if "/" not in x:
11889                 alphanum = re.search(r'\w', x)
11890                 if alphanum:
11891                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11892         return portage.isvalidatom(x)
11893
11894 def show_blocker_docs_link():
11895         print
11896         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11897         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11898         print
11899         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11900         print
11901
11902 def show_mask_docs():
11903         print "For more information, see the MASKED PACKAGES section in the emerge"
11904         print "man page or refer to the Gentoo Handbook."
11905
11906 def action_sync(settings, trees, mtimedb, myopts, myaction):
11907         xterm_titles = "notitles" not in settings.features
11908         emergelog(xterm_titles, " === sync")
11909         myportdir = settings.get("PORTDIR", None)
11910         out = portage.output.EOutput()
11911         if not myportdir:
11912                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
11913                 sys.exit(1)
11914         if myportdir[-1]=="/":
11915                 myportdir=myportdir[:-1]
11916         try:
11917                 st = os.stat(myportdir)
11918         except OSError:
11919                 st = None
11920         if st is None:
11921                 print ">>>",myportdir,"not found, creating it."
11922                 os.makedirs(myportdir,0755)
11923                 st = os.stat(myportdir)
11924
11925         spawn_kwargs = {}
11926         spawn_kwargs["env"] = settings.environ()
11927         if portage.data.secpass >= 2 and \
11928                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
11929                 st.st_gid != os.getgid() and st.st_mode & 0070):
11930                 try:
11931                         homedir = pwd.getpwuid(st.st_uid).pw_dir
11932                 except KeyError:
11933                         pass
11934                 else:
11935                         # Drop privileges when syncing, in order to match
11936                         # existing uid/gid settings.
11937                         spawn_kwargs["uid"]    = st.st_uid
11938                         spawn_kwargs["gid"]    = st.st_gid
11939                         spawn_kwargs["groups"] = [st.st_gid]
11940                         spawn_kwargs["env"]["HOME"] = homedir
11941                         umask = 0002
11942                         if not st.st_mode & 0020:
11943                                 umask = umask | 0020
11944                         spawn_kwargs["umask"] = umask
11945
11946         syncuri = settings.get("SYNC", "").strip()
11947         if not syncuri:
11948                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11949                         noiselevel=-1, level=logging.ERROR)
11950                 return 1
11951
11952         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11953         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11954
11955         os.umask(0022)
11956         dosyncuri = syncuri
11957         updatecache_flg = False
11958         if myaction == "metadata":
11959                 print "skipping sync"
11960                 updatecache_flg = True
11961         elif ".git" in vcs_dirs:
11962                 # Update existing git repository, and ignore the syncuri. We are
11963                 # going to trust the user and assume that the user is in the branch
11964                 # that he/she wants updated. We'll let the user manage branches with
11965                 # git directly.
11966                 if portage.process.find_binary("git") is None:
11967                         msg = ["Command not found: git",
11968                         "Type \"emerge dev-util/git\" to enable git support."]
11969                         for l in msg:
11970                                 writemsg_level("!!! %s\n" % l,
11971                                         level=logging.ERROR, noiselevel=-1)
11972                         return 1
11973                 msg = ">>> Starting git pull in %s..." % myportdir
11974                 emergelog(xterm_titles, msg )
11975                 writemsg_level(msg + "\n")
11976                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
11977                         (portage._shell_quote(myportdir),), **spawn_kwargs)
11978                 if exitcode != os.EX_OK:
11979                         msg = "!!! git pull error in %s." % myportdir
11980                         emergelog(xterm_titles, msg)
11981                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11982                         return exitcode
11983                 msg = ">>> Git pull in %s successful" % myportdir
11984                 emergelog(xterm_titles, msg)
11985                 writemsg_level(msg + "\n")
11986                 exitcode = git_sync_timestamps(settings, myportdir)
11987                 if exitcode == os.EX_OK:
11988                         updatecache_flg = True
11989         elif syncuri[:8]=="rsync://":
11990                 for vcs_dir in vcs_dirs:
11991                         writemsg_level(("!!! %s appears to be under revision " + \
11992                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11993                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11994                         return 1
11995                 if not os.path.exists("/usr/bin/rsync"):
11996                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11997                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11998                         sys.exit(1)
11999                 mytimeout=180
12000
12001                 rsync_opts = []
12002                 import shlex, StringIO
12003                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12004                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12005                         rsync_opts.extend([
12006                                 "--recursive",    # Recurse directories
12007                                 "--links",        # Consider symlinks
12008                                 "--safe-links",   # Ignore links outside of tree
12009                                 "--perms",        # Preserve permissions
12010                                 "--times",        # Preserive mod times
12011                                 "--compress",     # Compress the data transmitted
12012                                 "--force",        # Force deletion on non-empty dirs
12013                                 "--whole-file",   # Don't do block transfers, only entire files
12014                                 "--delete",       # Delete files that aren't in the master tree
12015                                 "--stats",        # Show final statistics about what was transfered
12016                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12017                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12018                                 "--exclude=/local",       # Exclude local     from consideration
12019                                 "--exclude=/packages",    # Exclude packages  from consideration
12020                         ])
12021
12022                 else:
12023                         # The below validation is not needed when using the above hardcoded
12024                         # defaults.
12025
12026                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12027                         lexer = shlex.shlex(StringIO.StringIO(
12028                                 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
12029                         lexer.whitespace_split = True
12030                         rsync_opts.extend(lexer)
12031                         del lexer
12032
12033                         for opt in ("--recursive", "--times"):
12034                                 if opt not in rsync_opts:
12035                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12036                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12037                                         rsync_opts.append(opt)
12038         
12039                         for exclude in ("distfiles", "local", "packages"):
12040                                 opt = "--exclude=/%s" % exclude
12041                                 if opt not in rsync_opts:
12042                                         portage.writemsg(yellow("WARNING:") + \
12043                                         " adding required option %s not included in "  % opt + \
12044                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12045                                         rsync_opts.append(opt)
12046         
12047                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12048                                 def rsync_opt_startswith(opt_prefix):
12049                                         for x in rsync_opts:
12050                                                 if x.startswith(opt_prefix):
12051                                                         return True
12052                                         return False
12053
12054                                 if not rsync_opt_startswith("--timeout="):
12055                                         rsync_opts.append("--timeout=%d" % mytimeout)
12056
12057                                 for opt in ("--compress", "--whole-file"):
12058                                         if opt not in rsync_opts:
12059                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12060                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12061                                                 rsync_opts.append(opt)
12062
12063                 if "--quiet" in myopts:
12064                         rsync_opts.append("--quiet")    # Shut up a lot
12065                 else:
12066                         rsync_opts.append("--verbose")  # Print filelist
12067
12068                 if "--verbose" in myopts:
12069                         rsync_opts.append("--progress")  # Progress meter for each file
12070
12071                 if "--debug" in myopts:
12072                         rsync_opts.append("--checksum") # Force checksum on all files
12073
12074                 # Real local timestamp file.
12075                 servertimestampfile = os.path.join(
12076                         myportdir, "metadata", "timestamp.chk")
12077
12078                 content = portage.util.grabfile(servertimestampfile)
12079                 mytimestamp = 0
12080                 if content:
12081                         try:
12082                                 mytimestamp = time.mktime(time.strptime(content[0],
12083                                         "%a, %d %b %Y %H:%M:%S +0000"))
12084                         except (OverflowError, ValueError):
12085                                 pass
12086                 del content
12087
12088                 try:
12089                         rsync_initial_timeout = \
12090                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12091                 except ValueError:
12092                         rsync_initial_timeout = 15
12093
12094                 try:
12095                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12096                 except SystemExit, e:
12097                         raise # Needed else can't exit
12098                 except:
12099                         maxretries=3 #default number of retries
12100
12101                 retries=0
12102                 user_name, hostname, port = re.split(
12103                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12104                 if port is None:
12105                         port=""
12106                 if user_name is None:
12107                         user_name=""
12108                 updatecache_flg=True
12109                 all_rsync_opts = set(rsync_opts)
12110                 lexer = shlex.shlex(StringIO.StringIO(
12111                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12112                 lexer.whitespace_split = True
12113                 extra_rsync_opts = list(lexer)
12114                 del lexer
12115                 all_rsync_opts.update(extra_rsync_opts)
12116                 family = socket.AF_INET
12117                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12118                         family = socket.AF_INET
12119                 elif socket.has_ipv6 and \
12120                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12121                         family = socket.AF_INET6
12122                 ips=[]
12123                 SERVER_OUT_OF_DATE = -1
12124                 EXCEEDED_MAX_RETRIES = -2
12125                 while (1):
12126                         if ips:
12127                                 del ips[0]
12128                         if ips==[]:
12129                                 try:
12130                                         for addrinfo in socket.getaddrinfo(
12131                                                 hostname, None, family, socket.SOCK_STREAM):
12132                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12133                                                         # IPv6 addresses need to be enclosed in square brackets
12134                                                         ips.append("[%s]" % addrinfo[4][0])
12135                                                 else:
12136                                                         ips.append(addrinfo[4][0])
12137                                         from random import shuffle
12138                                         shuffle(ips)
12139                                 except SystemExit, e:
12140                                         raise # Needed else can't exit
12141                                 except Exception, e:
12142                                         print "Notice:",str(e)
12143                                         dosyncuri=syncuri
12144
12145                         if ips:
12146                                 try:
12147                                         dosyncuri = syncuri.replace(
12148                                                 "//" + user_name + hostname + port + "/",
12149                                                 "//" + user_name + ips[0] + port + "/", 1)
12150                                 except SystemExit, e:
12151                                         raise # Needed else can't exit
12152                                 except Exception, e:
12153                                         print "Notice:",str(e)
12154                                         dosyncuri=syncuri
12155
12156                         if (retries==0):
12157                                 if "--ask" in myopts:
12158                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12159                                                 print
12160                                                 print "Quitting."
12161                                                 print
12162                                                 sys.exit(0)
12163                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12164                                 if "--quiet" not in myopts:
12165                                         print ">>> Starting rsync with "+dosyncuri+"..."
12166                         else:
12167                                 emergelog(xterm_titles,
12168                                         ">>> Starting retry %d of %d with %s" % \
12169                                                 (retries,maxretries,dosyncuri))
12170                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12171
12172                         if mytimestamp != 0 and "--quiet" not in myopts:
12173                                 print ">>> Checking server timestamp ..."
12174
12175                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12176
12177                         if "--debug" in myopts:
12178                                 print rsynccommand
12179
12180                         exitcode = os.EX_OK
12181                         servertimestamp = 0
12182                         # Even if there's no timestamp available locally, fetch the
12183                         # timestamp anyway as an initial probe to verify that the server is
12184                         # responsive.  This protects us from hanging indefinitely on a
12185                         # connection attempt to an unresponsive server which rsync's
12186                         # --timeout option does not prevent.
12187                         if True:
12188                                 # Temporary file for remote server timestamp comparison.
12189                                 from tempfile import mkstemp
12190                                 fd, tmpservertimestampfile = mkstemp()
12191                                 os.close(fd)
12192                                 mycommand = rsynccommand[:]
12193                                 mycommand.append(dosyncuri.rstrip("/") + \
12194                                         "/metadata/timestamp.chk")
12195                                 mycommand.append(tmpservertimestampfile)
12196                                 content = None
12197                                 mypids = []
12198                                 try:
12199                                         def timeout_handler(signum, frame):
12200                                                 raise portage.exception.PortageException("timed out")
12201                                         signal.signal(signal.SIGALRM, timeout_handler)
12202                                         # Timeout here in case the server is unresponsive.  The
12203                                         # --timeout rsync option doesn't apply to the initial
12204                                         # connection attempt.
12205                                         if rsync_initial_timeout:
12206                                                 signal.alarm(rsync_initial_timeout)
12207                                         try:
12208                                                 mypids.extend(portage.process.spawn(
12209                                                         mycommand, env=settings.environ(), returnpid=True))
12210                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12211                                                 content = portage.grabfile(tmpservertimestampfile)
12212                                         finally:
12213                                                 if rsync_initial_timeout:
12214                                                         signal.alarm(0)
12215                                                 try:
12216                                                         os.unlink(tmpservertimestampfile)
12217                                                 except OSError:
12218                                                         pass
12219                                 except portage.exception.PortageException, e:
12220                                         # timed out
12221                                         print e
12222                                         del e
12223                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12224                                                 os.kill(mypids[0], signal.SIGTERM)
12225                                                 os.waitpid(mypids[0], 0)
12226                                         # This is the same code rsync uses for timeout.
12227                                         exitcode = 30
12228                                 else:
12229                                         if exitcode != os.EX_OK:
12230                                                 if exitcode & 0xff:
12231                                                         exitcode = (exitcode & 0xff) << 8
12232                                                 else:
12233                                                         exitcode = exitcode >> 8
12234                                 if mypids:
12235                                         portage.process.spawned_pids.remove(mypids[0])
12236                                 if content:
12237                                         try:
12238                                                 servertimestamp = time.mktime(time.strptime(
12239                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12240                                         except (OverflowError, ValueError):
12241                                                 pass
12242                                 del mycommand, mypids, content
12243                         if exitcode == os.EX_OK:
12244                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12245                                         emergelog(xterm_titles,
12246                                                 ">>> Cancelling sync -- Already current.")
12247                                         print
12248                                         print ">>>"
12249                                         print ">>> Timestamps on the server and in the local repository are the same."
12250                                         print ">>> Cancelling all further sync action. You are already up to date."
12251                                         print ">>>"
12252                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12253                                         print ">>>"
12254                                         print
12255                                         sys.exit(0)
12256                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12257                                         emergelog(xterm_titles,
12258                                                 ">>> Server out of date: %s" % dosyncuri)
12259                                         print
12260                                         print ">>>"
12261                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12262                                         print ">>>"
12263                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12264                                         print ">>>"
12265                                         print
12266                                         exitcode = SERVER_OUT_OF_DATE
12267                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12268                                         # actual sync
12269                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12270                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12271                                         if exitcode in [0,1,3,4,11,14,20,21]:
12272                                                 break
12273                         elif exitcode in [1,3,4,11,14,20,21]:
12274                                 break
12275                         else:
12276                                 # Code 2 indicates protocol incompatibility, which is expected
12277                                 # for servers with protocol < 29 that don't support
12278                                 # --prune-empty-directories.  Retry for a server that supports
12279                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12280                                 pass
12281
12282                         retries=retries+1
12283
12284                         if retries<=maxretries:
12285                                 print ">>> Retrying..."
12286                                 time.sleep(11)
12287                         else:
12288                                 # over retries
12289                                 # exit loop
12290                                 updatecache_flg=False
12291                                 exitcode = EXCEEDED_MAX_RETRIES
12292                                 break
12293
12294                 if (exitcode==0):
12295                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12296                 elif exitcode == SERVER_OUT_OF_DATE:
12297                         sys.exit(1)
12298                 elif exitcode == EXCEEDED_MAX_RETRIES:
12299                         sys.stderr.write(
12300                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12301                         sys.exit(1)
12302                 elif (exitcode>0):
12303                         msg = []
12304                         if exitcode==1:
12305                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12306                                 msg.append("that your SYNC statement is proper.")
12307                                 msg.append("SYNC=" + settings["SYNC"])
12308                         elif exitcode==11:
12309                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12310                                 msg.append("this means your disk is full, but can be caused by corruption")
12311                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12312                                 msg.append("and try again after the problem has been fixed.")
12313                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12314                         elif exitcode==20:
12315                                 msg.append("Rsync was killed before it finished.")
12316                         else:
12317                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12318                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12319                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12320                                 msg.append("temporary problem unless complications exist with your network")
12321                                 msg.append("(and possibly your system's filesystem) configuration.")
12322                         for line in msg:
12323                                 out.eerror(line)
12324                         sys.exit(exitcode)
12325         elif syncuri[:6]=="cvs://":
12326                 if not os.path.exists("/usr/bin/cvs"):
12327                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12328                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12329                         sys.exit(1)
12330                 cvsroot=syncuri[6:]
12331                 cvsdir=os.path.dirname(myportdir)
12332                 if not os.path.exists(myportdir+"/CVS"):
12333                         #initial checkout
12334                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12335                         if os.path.exists(cvsdir+"/gentoo-x86"):
12336                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12337                                 sys.exit(1)
12338                         try:
12339                                 os.rmdir(myportdir)
12340                         except OSError, e:
12341                                 if e.errno != errno.ENOENT:
12342                                         sys.stderr.write(
12343                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12344                                         sys.exit(1)
12345                                 del e
12346                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12347                                 print "!!! cvs checkout error; exiting."
12348                                 sys.exit(1)
12349                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12350                 else:
12351                         #cvs update
12352                         print ">>> Starting cvs update with "+syncuri+"..."
12353                         retval = portage.process.spawn_bash(
12354                                 "cd %s; cvs -z0 -q update -dP" % \
12355                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12356                         if retval != os.EX_OK:
12357                                 sys.exit(retval)
12358                 dosyncuri = syncuri
12359         else:
12360                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12361                         noiselevel=-1, level=logging.ERROR)
12362                 return 1
12363
12364         if updatecache_flg and  \
12365                 myaction != "metadata" and \
12366                 "metadata-transfer" not in settings.features:
12367                 updatecache_flg = False
12368
12369         # Reload the whole config from scratch.
12370         settings, trees, mtimedb = load_emerge_config(trees=trees)
12371         root_config = trees[settings["ROOT"]]["root_config"]
12372         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12373
12374         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12375                 action_metadata(settings, portdb, myopts)
12376
12377         if portage._global_updates(trees, mtimedb["updates"]):
12378                 mtimedb.commit()
12379                 # Reload the whole config from scratch.
12380                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12381                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12382                 root_config = trees[settings["ROOT"]]["root_config"]
12383
12384         mybestpv = portdb.xmatch("bestmatch-visible",
12385                 portage.const.PORTAGE_PACKAGE_ATOM)
12386         mypvs = portage.best(
12387                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12388                 portage.const.PORTAGE_PACKAGE_ATOM))
12389
12390         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12391
12392         if myaction != "metadata":
12393                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12394                         retval = portage.process.spawn(
12395                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12396                                 dosyncuri], env=settings.environ())
12397                         if retval != os.EX_OK:
12398                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12399
12400         if(mybestpv != mypvs) and not "--quiet" in myopts:
12401                 print
12402                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12403                 print red(" * ")+"that you update portage now, before any other packages are updated."
12404                 print
12405                 print red(" * ")+"To update portage, run 'emerge portage' now."
12406                 print
12407         
12408         display_news_notification(root_config, myopts)
12409         return os.EX_OK
12410
12411 def git_sync_timestamps(settings, portdir):
12412         """
12413         Since git doesn't preserve timestamps, synchronize timestamps between
12414         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12415         for a given file as long as the file in the working tree is not modified
12416         (relative to HEAD).
12417         """
12418         cache_dir = os.path.join(portdir, "metadata", "cache")
12419         if not os.path.isdir(cache_dir):
12420                 return os.EX_OK
12421         writemsg_level(">>> Synchronizing timestamps...\n")
12422
12423         from portage.cache.cache_errors import CacheError
12424         try:
12425                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12426                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12427         except CacheError, e:
12428                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12429                         level=logging.ERROR, noiselevel=-1)
12430                 return 1
12431
12432         ec_dir = os.path.join(portdir, "eclass")
12433         try:
12434                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12435                         if f.endswith(".eclass"))
12436         except OSError, e:
12437                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12438                         level=logging.ERROR, noiselevel=-1)
12439                 return 1
12440
12441         args = [portage.const.BASH_BINARY, "-c",
12442                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12443                 portage._shell_quote(portdir)]
12444         import subprocess
12445         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12446         modified_files = set(l.rstrip("\n") for l in proc.stdout)
12447         rval = proc.wait()
12448         if rval != os.EX_OK:
12449                 return rval
12450
12451         modified_eclasses = set(ec for ec in ec_names \
12452                 if os.path.join("eclass", ec + ".eclass") in modified_files)
12453
12454         updated_ec_mtimes = {}
12455
12456         for cpv in cache_db:
12457                 cpv_split = portage.catpkgsplit(cpv)
12458                 if cpv_split is None:
12459                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12460                                 level=logging.ERROR, noiselevel=-1)
12461                         continue
12462
12463                 cat, pn, ver, rev = cpv_split
12464                 cat, pf = portage.catsplit(cpv)
12465                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12466                 if relative_eb_path in modified_files:
12467                         continue
12468
12469                 try:
12470                         cache_entry = cache_db[cpv]
12471                         eb_mtime = cache_entry.get("_mtime_")
12472                         ec_mtimes = cache_entry.get("_eclasses_")
12473                 except KeyError:
12474                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12475                                 level=logging.ERROR, noiselevel=-1)
12476                         continue
12477                 except CacheError, e:
12478                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12479                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
12480                         continue
12481
12482                 if eb_mtime is None:
12483                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12484                                 level=logging.ERROR, noiselevel=-1)
12485                         continue
12486
12487                 try:
12488                         eb_mtime = long(eb_mtime)
12489                 except ValueError:
12490                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12491                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12492                         continue
12493
12494                 if ec_mtimes is None:
12495                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12496                                 level=logging.ERROR, noiselevel=-1)
12497                         continue
12498
12499                 if modified_eclasses.intersection(ec_mtimes):
12500                         continue
12501
12502                 missing_eclasses = set(ec_mtimes).difference(ec_names)
12503                 if missing_eclasses:
12504                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12505                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12506                                 noiselevel=-1)
12507                         continue
12508
12509                 eb_path = os.path.join(portdir, relative_eb_path)
12510                 try:
12511                         current_eb_mtime = os.stat(eb_path)
12512                 except OSError:
12513                         writemsg_level("!!! Missing ebuild: %s\n" % \
12514                                 (cpv,), level=logging.ERROR, noiselevel=-1)
12515                         continue
12516
12517                 inconsistent = False
12518                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12519                         updated_mtime = updated_ec_mtimes.get(ec)
12520                         if updated_mtime is not None and updated_mtime != ec_mtime:
12521                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12522                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
12523                                 inconsistent = True
12524                                 break
12525
12526                 if inconsistent:
12527                         continue
12528
12529                 if current_eb_mtime != eb_mtime:
12530                         os.utime(eb_path, (eb_mtime, eb_mtime))
12531
12532                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12533                         if ec in updated_ec_mtimes:
12534                                 continue
12535                         ec_path = os.path.join(ec_dir, ec + ".eclass")
12536                         current_mtime = long(os.stat(ec_path).st_mtime)
12537                         if current_mtime != ec_mtime:
12538                                 os.utime(ec_path, (ec_mtime, ec_mtime))
12539                         updated_ec_mtimes[ec] = ec_mtime
12540
12541         return os.EX_OK
12542
12543 def action_metadata(settings, portdb, myopts):
12544         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
12545         old_umask = os.umask(0002)
12546         cachedir = os.path.normpath(settings.depcachedir)
12547         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
12548                                         "/lib", "/opt", "/proc", "/root", "/sbin",
12549                                         "/sys", "/tmp", "/usr",  "/var"]:
12550                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12551                         "ROOT DIRECTORY ON YOUR SYSTEM."
12552                 print >> sys.stderr, \
12553                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12554                 sys.exit(73)
12555         if not os.path.exists(cachedir):
12556                 os.mkdir(cachedir)
12557
12558         ec = portage.eclass_cache.cache(portdb.porttree_root)
12559         myportdir = os.path.realpath(settings["PORTDIR"])
12560         cm = settings.load_best_module("portdbapi.metadbmodule")(
12561                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12562
12563         from portage.cache import util
12564
12565         class percentage_noise_maker(util.quiet_mirroring):
12566                 def __init__(self, dbapi):
12567                         self.dbapi = dbapi
12568                         self.cp_all = dbapi.cp_all()
12569                         l = len(self.cp_all)
12570                         self.call_update_min = 100000000
12571                         self.min_cp_all = l/100.0
12572                         self.count = 1
12573                         self.pstr = ''
12574
12575                 def __iter__(self):
12576                         for x in self.cp_all:
12577                                 self.count += 1
12578                                 if self.count > self.min_cp_all:
12579                                         self.call_update_min = 0
12580                                         self.count = 0
12581                                 for y in self.dbapi.cp_list(x):
12582                                         yield y
12583                         self.call_update_mine = 0
12584
12585                 def update(self, *arg):
12586                         try:                            self.pstr = int(self.pstr) + 1
12587                         except ValueError:      self.pstr = 1
12588                         sys.stdout.write("%s%i%%" % \
12589                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
12590                         sys.stdout.flush()
12591                         self.call_update_min = 10000000
12592
12593                 def finish(self, *arg):
12594                         sys.stdout.write("\b\b\b\b100%\n")
12595                         sys.stdout.flush()
12596
12597         if "--quiet" in myopts:
12598                 def quicky_cpv_generator(cp_all_list):
12599                         for x in cp_all_list:
12600                                 for y in portdb.cp_list(x):
12601                                         yield y
12602                 source = quicky_cpv_generator(portdb.cp_all())
12603                 noise_maker = portage.cache.util.quiet_mirroring()
12604         else:
12605                 noise_maker = source = percentage_noise_maker(portdb)
12606         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12607                 eclass_cache=ec, verbose_instance=noise_maker)
12608
12609         sys.stdout.flush()
12610         os.umask(old_umask)
12611
12612 def action_regen(settings, portdb, max_jobs, max_load):
12613         xterm_titles = "notitles" not in settings.features
12614         emergelog(xterm_titles, " === regen")
12615         #regenerate cache entries
12616         portage.writemsg_stdout("Regenerating cache entries...\n")
12617         try:
12618                 os.close(sys.stdin.fileno())
12619         except SystemExit, e:
12620                 raise # Needed else can't exit
12621         except:
12622                 pass
12623         sys.stdout.flush()
12624
12625         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12626         regen.run()
12627
12628         portage.writemsg_stdout("done!\n")
12629
12630 def action_config(settings, trees, myopts, myfiles):
12631         if len(myfiles) != 1:
12632                 print red("!!! config can only take a single package atom at this time\n")
12633                 sys.exit(1)
12634         if not is_valid_package_atom(myfiles[0]):
12635                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12636                         noiselevel=-1)
12637                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12638                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12639                 sys.exit(1)
12640         print
12641         try:
12642                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12643         except portage.exception.AmbiguousPackageName, e:
12644                 # Multiple matches thrown from cpv_expand
12645                 pkgs = e.args[0]
12646         if len(pkgs) == 0:
12647                 print "No packages found.\n"
12648                 sys.exit(0)
12649         elif len(pkgs) > 1:
12650                 if "--ask" in myopts:
12651                         options = []
12652                         print "Please select a package to configure:"
12653                         idx = 0
12654                         for pkg in pkgs:
12655                                 idx += 1
12656                                 options.append(str(idx))
12657                                 print options[-1]+") "+pkg
12658                         print "X) Cancel"
12659                         options.append("X")
12660                         idx = userquery("Selection?", options)
12661                         if idx == "X":
12662                                 sys.exit(0)
12663                         pkg = pkgs[int(idx)-1]
12664                 else:
12665                         print "The following packages available:"
12666                         for pkg in pkgs:
12667                                 print "* "+pkg
12668                         print "\nPlease use a specific atom or the --ask option."
12669                         sys.exit(1)
12670         else:
12671                 pkg = pkgs[0]
12672
12673         print
12674         if "--ask" in myopts:
12675                 if userquery("Ready to configure "+pkg+"?") == "No":
12676                         sys.exit(0)
12677         else:
12678                 print "Configuring pkg..."
12679         print
12680         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12681         mysettings = portage.config(clone=settings)
12682         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12683         debug = mysettings.get("PORTAGE_DEBUG") == "1"
12684         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12685                 mysettings,
12686                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12687                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12688         if retval == os.EX_OK:
12689                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12690                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12691         print
12692
12693 def action_info(settings, trees, myopts, myfiles):
12694         print getportageversion(settings["PORTDIR"], settings["ROOT"],
12695                 settings.profile_path, settings["CHOST"],
12696                 trees[settings["ROOT"]]["vartree"].dbapi)
12697         header_width = 65
12698         header_title = "System Settings"
12699         if myfiles:
12700                 print header_width * "="
12701                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12702         print header_width * "="
12703         print "System uname: "+platform.platform(aliased=1)
12704
12705         lastSync = portage.grabfile(os.path.join(
12706                 settings["PORTDIR"], "metadata", "timestamp.chk"))
12707         print "Timestamp of tree:",
12708         if lastSync:
12709                 print lastSync[0]
12710         else:
12711                 print "Unknown"
12712
12713         output=commands.getstatusoutput("distcc --version")
12714         if not output[0]:
12715                 print str(output[1].split("\n",1)[0]),
12716                 if "distcc" in settings.features:
12717                         print "[enabled]"
12718                 else:
12719                         print "[disabled]"
12720
12721         output=commands.getstatusoutput("ccache -V")
12722         if not output[0]:
12723                 print str(output[1].split("\n",1)[0]),
12724                 if "ccache" in settings.features:
12725                         print "[enabled]"
12726                 else:
12727                         print "[disabled]"
12728
12729         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12730                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
12731         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12732         myvars  = portage.util.unique_array(myvars)
12733         myvars.sort()
12734
12735         for x in myvars:
12736                 if portage.isvalidatom(x):
12737                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12738                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12739                         pkg_matches.sort(portage.pkgcmp)
12740                         pkgs = []
12741                         for pn, ver, rev in pkg_matches:
12742                                 if rev != "r0":
12743                                         pkgs.append(ver + "-" + rev)
12744                                 else:
12745                                         pkgs.append(ver)
12746                         if pkgs:
12747                                 pkgs = ", ".join(pkgs)
12748                                 print "%-20s %s" % (x+":", pkgs)
12749                 else:
12750                         print "%-20s %s" % (x+":", "[NOT VALID]")
12751
12752         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12753
12754         if "--verbose" in myopts:
12755                 myvars=settings.keys()
12756         else:
12757                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12758                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12759                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12760                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12761
12762                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12763
12764         myvars = portage.util.unique_array(myvars)
12765         unset_vars = []
12766         myvars.sort()
12767         for x in myvars:
12768                 if x in settings:
12769                         if x != "USE":
12770                                 print '%s="%s"' % (x, settings[x])
12771                         else:
12772                                 use = set(settings["USE"].split())
12773                                 use_expand = settings["USE_EXPAND"].split()
12774                                 use_expand.sort()
12775                                 for varname in use_expand:
12776                                         flag_prefix = varname.lower() + "_"
12777                                         for f in list(use):
12778                                                 if f.startswith(flag_prefix):
12779                                                         use.remove(f)
12780                                 use = list(use)
12781                                 use.sort()
12782                                 print 'USE="%s"' % " ".join(use),
12783                                 for varname in use_expand:
12784                                         myval = settings.get(varname)
12785                                         if myval:
12786                                                 print '%s="%s"' % (varname, myval),
12787                                 print
12788                 else:
12789                         unset_vars.append(x)
12790         if unset_vars:
12791                 print "Unset:  "+", ".join(unset_vars)
12792         print
12793
12794         if "--debug" in myopts:
12795                 for x in dir(portage):
12796                         module = getattr(portage, x)
12797                         if "cvs_id_string" in dir(module):
12798                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
12799
12800         # See if we can find any packages installed matching the strings
12801         # passed on the command line
12802         mypkgs = []
12803         vardb = trees[settings["ROOT"]]["vartree"].dbapi
12804         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12805         for x in myfiles:
12806                 mypkgs.extend(vardb.match(x))
12807
12808         # If some packages were found...
12809         if mypkgs:
12810                 # Get our global settings (we only print stuff if it varies from
12811                 # the current config)
12812                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12813                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12814                 global_vals = {}
12815                 pkgsettings = portage.config(clone=settings)
12816
12817                 for myvar in mydesiredvars:
12818                         global_vals[myvar] = set(settings.get(myvar, "").split())
12819
12820                 # Loop through each package
12821                 # Only print settings if they differ from global settings
12822                 header_title = "Package Settings"
12823                 print header_width * "="
12824                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12825                 print header_width * "="
12826                 from portage.output import EOutput
12827                 out = EOutput()
12828                 for pkg in mypkgs:
12829                         # Get all package specific variables
12830                         auxvalues = vardb.aux_get(pkg, auxkeys)
12831                         valuesmap = {}
12832                         for i in xrange(len(auxkeys)):
12833                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12834                         diff_values = {}
12835                         for myvar in mydesiredvars:
12836                                 # If the package variable doesn't match the
12837                                 # current global variable, something has changed
12838                                 # so set diff_found so we know to print
12839                                 if valuesmap[myvar] != global_vals[myvar]:
12840                                         diff_values[myvar] = valuesmap[myvar]
12841                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12842                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12843                         pkgsettings.reset()
12844                         # If a matching ebuild is no longer available in the tree, maybe it
12845                         # would make sense to compare against the flags for the best
12846                         # available version with the same slot?
12847                         mydb = None
12848                         if portdb.cpv_exists(pkg):
12849                                 mydb = portdb
12850                         pkgsettings.setcpv(pkg, mydb=mydb)
12851                         if valuesmap["IUSE"].intersection(
12852                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12853                                 diff_values["USE"] = valuesmap["USE"]
12854                         # If a difference was found, print the info for
12855                         # this package.
12856                         if diff_values:
12857                                 # Print package info
12858                                 print "%s was built with the following:" % pkg
12859                                 for myvar in mydesiredvars + ["USE"]:
12860                                         if myvar in diff_values:
12861                                                 mylist = list(diff_values[myvar])
12862                                                 mylist.sort()
12863                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12864                                 print
12865                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
12866                         ebuildpath = vardb.findname(pkg)
12867                         if not ebuildpath or not os.path.exists(ebuildpath):
12868                                 out.ewarn("No ebuild found for '%s'" % pkg)
12869                                 continue
12870                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12871                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12872                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12873                                 tree="vartree")
12874
12875 def action_search(root_config, myopts, myfiles, spinner):
12876         if not myfiles:
12877                 print "emerge: no search terms provided."
12878         else:
12879                 searchinstance = search(root_config,
12880                         spinner, "--searchdesc" in myopts,
12881                         "--quiet" not in myopts, "--usepkg" in myopts,
12882                         "--usepkgonly" in myopts)
12883                 for mysearch in myfiles:
12884                         try:
12885                                 searchinstance.execute(mysearch)
12886                         except re.error, comment:
12887                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12888                                 sys.exit(1)
12889                         searchinstance.output()
12890
12891 def action_depclean(settings, trees, ldpath_mtimes,
12892         myopts, action, myfiles, spinner):
12893         # Kill packages that aren't explicitly merged or are required as a
12894         # dependency of another package. World file is explicit.
12895
12896         # Global depclean or prune operations are not very safe when there are
12897         # missing dependencies since it's unknown how badly incomplete
12898         # the dependency graph is, and we might accidentally remove packages
12899         # that should have been pulled into the graph. On the other hand, it's
12900         # relatively safe to ignore missing deps when only asked to remove
12901         # specific packages.
12902         allow_missing_deps = len(myfiles) > 0
12903
12904         msg = []
12905         msg.append("Always study the list of packages to be cleaned for any obvious\n")
12906         msg.append("mistakes. Packages that are part of the world set will always\n")
12907         msg.append("be kept.  They can be manually added to this set with\n")
12908         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
12909         msg.append("package.provided (see portage(5)) will be removed by\n")
12910         msg.append("depclean, even if they are part of the world set.\n")
12911         msg.append("\n")
12912         msg.append("As a safety measure, depclean will not remove any packages\n")
12913         msg.append("unless *all* required dependencies have been resolved.  As a\n")
12914         msg.append("consequence, it is often necessary to run %s\n" % \
12915                 good("`emerge --update"))
12916         msg.append(good("--newuse --deep @system @world`") + \
12917                 " prior to depclean.\n")
12918
12919         if action == "depclean" and "--quiet" not in myopts and not myfiles:
12920                 portage.writemsg_stdout("\n")
12921                 for x in msg:
12922                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
12923
12924         xterm_titles = "notitles" not in settings.features
12925         myroot = settings["ROOT"]
12926         root_config = trees[myroot]["root_config"]
12927         getSetAtoms = root_config.setconfig.getSetAtoms
12928         vardb = trees[myroot]["vartree"].dbapi
12929
12930         required_set_names = ("system", "world")
12931         required_sets = {}
12932         set_args = []
12933
12934         for s in required_set_names:
12935                 required_sets[s] = InternalPackageSet(
12936                         initial_atoms=getSetAtoms(s))
12937
12938         
12939         # When removing packages, use a temporary version of world
12940         # which excludes packages that are intended to be eligible for
12941         # removal.
12942         world_temp_set = required_sets["world"]
12943         system_set = required_sets["system"]
12944
12945         if not system_set or not world_temp_set:
12946
12947                 if not system_set:
12948                         writemsg_level("!!! You have no system list.\n",
12949                                 level=logging.ERROR, noiselevel=-1)
12950
12951                 if not world_temp_set:
12952                         writemsg_level("!!! You have no world file.\n",
12953                                         level=logging.WARNING, noiselevel=-1)
12954
12955                 writemsg_level("!!! Proceeding is likely to " + \
12956                         "break your installation.\n",
12957                         level=logging.WARNING, noiselevel=-1)
12958                 if "--pretend" not in myopts:
12959                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12960
12961         if action == "depclean":
12962                 emergelog(xterm_titles, " >>> depclean")
12963
12964         import textwrap
12965         args_set = InternalPackageSet()
12966         if myfiles:
12967                 for x in myfiles:
12968                         if not is_valid_package_atom(x):
12969                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12970                                         level=logging.ERROR, noiselevel=-1)
12971                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12972                                 return
12973                         try:
12974                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12975                         except portage.exception.AmbiguousPackageName, e:
12976                                 msg = "The short ebuild name \"" + x + \
12977                                         "\" is ambiguous.  Please specify " + \
12978                                         "one of the following " + \
12979                                         "fully-qualified ebuild names instead:"
12980                                 for line in textwrap.wrap(msg, 70):
12981                                         writemsg_level("!!! %s\n" % (line,),
12982                                                 level=logging.ERROR, noiselevel=-1)
12983                                 for i in e[0]:
12984                                         writemsg_level("    %s\n" % colorize("INFORM", i),
12985                                                 level=logging.ERROR, noiselevel=-1)
12986                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12987                                 return
12988                         args_set.add(atom)
12989                 matched_packages = False
12990                 for x in args_set:
12991                         if vardb.match(x):
12992                                 matched_packages = True
12993                                 break
12994                 if not matched_packages:
12995                         writemsg_level(">>> No packages selected for removal by %s\n" % \
12996                                 action)
12997                         return
12998
12999         writemsg_level("\nCalculating dependencies  ")
13000         resolver_params = create_depgraph_params(myopts, "remove")
13001         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13002         vardb = resolver.trees[myroot]["vartree"].dbapi
13003
13004         if action == "depclean":
13005
13006                 if args_set:
13007                         # Pull in everything that's installed but not matched
13008                         # by an argument atom since we don't want to clean any
13009                         # package if something depends on it.
13010
13011                         world_temp_set.clear()
13012                         for pkg in vardb:
13013                                 spinner.update()
13014
13015                                 try:
13016                                         if args_set.findAtomForPackage(pkg) is None:
13017                                                 world_temp_set.add("=" + pkg.cpv)
13018                                                 continue
13019                                 except portage.exception.InvalidDependString, e:
13020                                         show_invalid_depstring_notice(pkg,
13021                                                 pkg.metadata["PROVIDE"], str(e))
13022                                         del e
13023                                         world_temp_set.add("=" + pkg.cpv)
13024                                         continue
13025
13026         elif action == "prune":
13027
13028                 # Pull in everything that's installed since we don't
13029                 # to prune a package if something depends on it.
13030                 world_temp_set.clear()
13031                 world_temp_set.update(vardb.cp_all())
13032
13033                 if not args_set:
13034
13035                         # Try to prune everything that's slotted.
13036                         for cp in vardb.cp_all():
13037                                 if len(vardb.cp_list(cp)) > 1:
13038                                         args_set.add(cp)
13039
13040                 # Remove atoms from world that match installed packages
13041                 # that are also matched by argument atoms, but do not remove
13042                 # them if they match the highest installed version.
13043                 for pkg in vardb:
13044                         spinner.update()
13045                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13046                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13047                                 raise AssertionError("package expected in matches: " + \
13048                                         "cp = %s, cpv = %s matches = %s" % \
13049                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13050
13051                         highest_version = pkgs_for_cp[-1]
13052                         if pkg == highest_version:
13053                                 # pkg is the highest version
13054                                 world_temp_set.add("=" + pkg.cpv)
13055                                 continue
13056
13057                         if len(pkgs_for_cp) <= 1:
13058                                 raise AssertionError("more packages expected: " + \
13059                                         "cp = %s, cpv = %s matches = %s" % \
13060                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13061
13062                         try:
13063                                 if args_set.findAtomForPackage(pkg) is None:
13064                                         world_temp_set.add("=" + pkg.cpv)
13065                                         continue
13066                         except portage.exception.InvalidDependString, e:
13067                                 show_invalid_depstring_notice(pkg,
13068                                         pkg.metadata["PROVIDE"], str(e))
13069                                 del e
13070                                 world_temp_set.add("=" + pkg.cpv)
13071                                 continue
13072
13073         set_args = {}
13074         for s, package_set in required_sets.iteritems():
13075                 set_atom = SETPREFIX + s
13076                 set_arg = SetArg(arg=set_atom, set=package_set,
13077                         root_config=resolver.roots[myroot])
13078                 set_args[s] = set_arg
13079                 for atom in set_arg.set:
13080                         resolver._dep_stack.append(
13081                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13082                         resolver.digraph.add(set_arg, None)
13083
13084         success = resolver._complete_graph()
13085         writemsg_level("\b\b... done!\n")
13086
13087         resolver.display_problems()
13088
13089         if not success:
13090                 return 1
13091
13092         def unresolved_deps():
13093
13094                 unresolvable = set()
13095                 for dep in resolver._initially_unsatisfied_deps:
13096                         if isinstance(dep.parent, Package) and \
13097                                 (dep.priority > UnmergeDepPriority.SOFT):
13098                                 unresolvable.add((dep.atom, dep.parent.cpv))
13099
13100                 if not unresolvable:
13101                         return False
13102
13103                 if unresolvable and not allow_missing_deps:
13104                         prefix = bad(" * ")
13105                         msg = []
13106                         msg.append("Dependencies could not be completely resolved due to")
13107                         msg.append("the following required packages not being installed:")
13108                         msg.append("")
13109                         for atom, parent in unresolvable:
13110                                 msg.append("  %s pulled in by:" % (atom,))
13111                                 msg.append("    %s" % (parent,))
13112                                 msg.append("")
13113                         msg.append("Have you forgotten to run " + \
13114                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13115                         msg.append(("to %s? It may be necessary to manually " + \
13116                                 "uninstall packages that no longer") % action)
13117                         msg.append("exist in the portage tree since " + \
13118                                 "it may not be possible to satisfy their")
13119                         msg.append("dependencies.  Also, be aware of " + \
13120                                 "the --with-bdeps option that is documented")
13121                         msg.append("in " + good("`man emerge`") + ".")
13122                         if action == "prune":
13123                                 msg.append("")
13124                                 msg.append("If you would like to ignore " + \
13125                                         "dependencies then use %s." % good("--nodeps"))
13126                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13127                                 level=logging.ERROR, noiselevel=-1)
13128                         return True
13129                 return False
13130
13131         if unresolved_deps():
13132                 return 1
13133
13134         graph = resolver.digraph.copy()
13135         required_pkgs_total = 0
13136         for node in graph:
13137                 if isinstance(node, Package):
13138                         required_pkgs_total += 1
13139
13140         def show_parents(child_node):
13141                 parent_nodes = graph.parent_nodes(child_node)
13142                 if not parent_nodes:
13143                         # With --prune, the highest version can be pulled in without any
13144                         # real parent since all installed packages are pulled in.  In that
13145                         # case there's nothing to show here.
13146                         return
13147                 parent_strs = []
13148                 for node in parent_nodes:
13149                         parent_strs.append(str(getattr(node, "cpv", node)))
13150                 parent_strs.sort()
13151                 msg = []
13152                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13153                 for parent_str in parent_strs:
13154                         msg.append("    %s\n" % (parent_str,))
13155                 msg.append("\n")
13156                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13157
13158         def create_cleanlist():
13159                 pkgs_to_remove = []
13160
13161                 if action == "depclean":
13162                         if args_set:
13163
13164                                 for pkg in vardb:
13165                                         arg_atom = None
13166                                         try:
13167                                                 arg_atom = args_set.findAtomForPackage(pkg)
13168                                         except portage.exception.InvalidDependString:
13169                                                 # this error has already been displayed by now
13170                                                 continue
13171
13172                                         if arg_atom:
13173                                                 if pkg not in graph:
13174                                                         pkgs_to_remove.append(pkg)
13175                                                 elif "--verbose" in myopts:
13176                                                         show_parents(pkg)
13177
13178                         else:
13179                                 for pkg in vardb:
13180                                         if pkg not in graph:
13181                                                 pkgs_to_remove.append(pkg)
13182                                         elif "--verbose" in myopts:
13183                                                 show_parents(pkg)
13184
13185                 elif action == "prune":
13186                         # Prune really uses all installed instead of world. It's not
13187                         # a real reverse dependency so don't display it as such.
13188                         graph.remove(set_args["world"])
13189
13190                         for atom in args_set:
13191                                 for pkg in vardb.match_pkgs(atom):
13192                                         if pkg not in graph:
13193                                                 pkgs_to_remove.append(pkg)
13194                                         elif "--verbose" in myopts:
13195                                                 show_parents(pkg)
13196
13197                 if not pkgs_to_remove:
13198                         writemsg_level(
13199                                 ">>> No packages selected for removal by %s\n" % action)
13200                         if "--verbose" not in myopts:
13201                                 writemsg_level(
13202                                         ">>> To see reverse dependencies, use %s\n" % \
13203                                                 good("--verbose"))
13204                         if action == "prune":
13205                                 writemsg_level(
13206                                         ">>> To ignore dependencies, use %s\n" % \
13207                                                 good("--nodeps"))
13208
13209                 return pkgs_to_remove
13210
13211         cleanlist = create_cleanlist()
13212
13213         if len(cleanlist):
13214                 clean_set = set(cleanlist)
13215
13216                 # Check if any of these package are the sole providers of libraries
13217                 # with consumers that have not been selected for removal. If so, these
13218                 # packages and any dependencies need to be added to the graph.
13219                 real_vardb = trees[myroot]["vartree"].dbapi
13220                 linkmap = real_vardb.linkmap
13221                 liblist = linkmap.listLibraryObjects()
13222                 consumer_cache = {}
13223                 provider_cache = {}
13224                 soname_cache = {}
13225                 consumer_map = {}
13226
13227                 writemsg_level(">>> Checking for lib consumers...\n")
13228
13229                 for pkg in cleanlist:
13230                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13231                         provided_libs = set()
13232
13233                         for lib in liblist:
13234                                 if pkg_dblink.isowner(lib, myroot):
13235                                         provided_libs.add(lib)
13236
13237                         if not provided_libs:
13238                                 continue
13239
13240                         consumers = {}
13241                         for lib in provided_libs:
13242                                 lib_consumers = consumer_cache.get(lib)
13243                                 if lib_consumers is None:
13244                                         lib_consumers = linkmap.findConsumers(lib)
13245                                         consumer_cache[lib] = lib_consumers
13246                                 if lib_consumers:
13247                                         consumers[lib] = lib_consumers
13248
13249                         if not consumers:
13250                                 continue
13251
13252                         for lib, lib_consumers in consumers.items():
13253                                 for consumer_file in list(lib_consumers):
13254                                         if pkg_dblink.isowner(consumer_file, myroot):
13255                                                 lib_consumers.remove(consumer_file)
13256                                 if not lib_consumers:
13257                                         del consumers[lib]
13258
13259                         if not consumers:
13260                                 continue
13261
13262                         for lib, lib_consumers in consumers.iteritems():
13263
13264                                 soname = soname_cache.get(lib)
13265                                 if soname is None:
13266                                         soname = linkmap.getSoname(lib)
13267                                         soname_cache[lib] = soname
13268
13269                                 consumer_providers = []
13270                                 for lib_consumer in lib_consumers:
13271                                         providers = provider_cache.get(lib)
13272                                         if providers is None:
13273                                                 providers = linkmap.findProviders(lib_consumer)
13274                                                 provider_cache[lib_consumer] = providers
13275                                         if soname not in providers:
13276                                                 # Why does this happen?
13277                                                 continue
13278                                         consumer_providers.append(
13279                                                 (lib_consumer, providers[soname]))
13280
13281                                 consumers[lib] = consumer_providers
13282
13283                         consumer_map[pkg] = consumers
13284
13285                 if consumer_map:
13286
13287                         search_files = set()
13288                         for consumers in consumer_map.itervalues():
13289                                 for lib, consumer_providers in consumers.iteritems():
13290                                         for lib_consumer, providers in consumer_providers:
13291                                                 search_files.add(lib_consumer)
13292                                                 search_files.update(providers)
13293
13294                         writemsg_level(">>> Assigning files to packages...\n")
13295                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13296
13297                         for pkg, consumers in consumer_map.items():
13298                                 for lib, consumer_providers in consumers.items():
13299                                         lib_consumers = set()
13300
13301                                         for lib_consumer, providers in consumer_providers:
13302                                                 owner_set = file_owners.get(lib_consumer)
13303                                                 provider_dblinks = set()
13304                                                 provider_pkgs = set()
13305
13306                                                 if len(providers) > 1:
13307                                                         for provider in providers:
13308                                                                 provider_set = file_owners.get(provider)
13309                                                                 if provider_set is not None:
13310                                                                         provider_dblinks.update(provider_set)
13311
13312                                                 if len(provider_dblinks) > 1:
13313                                                         for provider_dblink in provider_dblinks:
13314                                                                 pkg_key = ("installed", myroot,
13315                                                                         provider_dblink.mycpv, "nomerge")
13316                                                                 if pkg_key not in clean_set:
13317                                                                         provider_pkgs.add(vardb.get(pkg_key))
13318
13319                                                 if provider_pkgs:
13320                                                         continue
13321
13322                                                 if owner_set is not None:
13323                                                         lib_consumers.update(owner_set)
13324
13325                                         for consumer_dblink in list(lib_consumers):
13326                                                 if ("installed", myroot, consumer_dblink.mycpv,
13327                                                         "nomerge") in clean_set:
13328                                                         lib_consumers.remove(consumer_dblink)
13329                                                         continue
13330
13331                                         if lib_consumers:
13332                                                 consumers[lib] = lib_consumers
13333                                         else:
13334                                                 del consumers[lib]
13335                                 if not consumers:
13336                                         del consumer_map[pkg]
13337
13338                 if consumer_map:
13339                         # TODO: Implement a package set for rebuilding consumer packages.
13340
13341                         msg = "In order to avoid breakage of link level " + \
13342                                 "dependencies, one or more packages will not be removed. " + \
13343                                 "This can be solved by rebuilding " + \
13344                                 "the packages that pulled them in."
13345
13346                         prefix = bad(" * ")
13347                         from textwrap import wrap
13348                         writemsg_level("".join(prefix + "%s\n" % line for \
13349                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13350
13351                         msg = []
13352                         for pkg, consumers in consumer_map.iteritems():
13353                                 unique_consumers = set(chain(*consumers.values()))
13354                                 unique_consumers = sorted(consumer.mycpv \
13355                                         for consumer in unique_consumers)
13356                                 msg.append("")
13357                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13358                                 for consumer in unique_consumers:
13359                                         msg.append("    %s" % (consumer,))
13360                         msg.append("")
13361                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13362                                 level=logging.WARNING, noiselevel=-1)
13363
13364                         # Add lib providers to the graph as children of lib consumers,
13365                         # and also add any dependencies pulled in by the provider.
13366                         writemsg_level(">>> Adding lib providers to graph...\n")
13367
13368                         for pkg, consumers in consumer_map.iteritems():
13369                                 for consumer_dblink in set(chain(*consumers.values())):
13370                                         consumer_pkg = vardb.get(("installed", myroot,
13371                                                 consumer_dblink.mycpv, "nomerge"))
13372                                         if not resolver._add_pkg(pkg,
13373                                                 Dependency(parent=consumer_pkg,
13374                                                 priority=UnmergeDepPriority(runtime=True),
13375                                                 root=pkg.root)):
13376                                                 resolver.display_problems()
13377                                                 return 1
13378
13379                         writemsg_level("\nCalculating dependencies  ")
13380                         success = resolver._complete_graph()
13381                         writemsg_level("\b\b... done!\n")
13382                         resolver.display_problems()
13383                         if not success:
13384                                 return 1
13385                         if unresolved_deps():
13386                                 return 1
13387
13388                         graph = resolver.digraph.copy()
13389                         required_pkgs_total = 0
13390                         for node in graph:
13391                                 if isinstance(node, Package):
13392                                         required_pkgs_total += 1
13393                         cleanlist = create_cleanlist()
13394                         if not cleanlist:
13395                                 return 0
13396                         clean_set = set(cleanlist)
13397
13398                 # Use a topological sort to create an unmerge order such that
13399                 # each package is unmerged before it's dependencies. This is
13400                 # necessary to avoid breaking things that may need to run
13401                 # during pkg_prerm or pkg_postrm phases.
13402
13403                 # Create a new graph to account for dependencies between the
13404                 # packages being unmerged.
13405                 graph = digraph()
13406                 del cleanlist[:]
13407
13408                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13409                 runtime = UnmergeDepPriority(runtime=True)
13410                 runtime_post = UnmergeDepPriority(runtime_post=True)
13411                 buildtime = UnmergeDepPriority(buildtime=True)
13412                 priority_map = {
13413                         "RDEPEND": runtime,
13414                         "PDEPEND": runtime_post,
13415                         "DEPEND": buildtime,
13416                 }
13417
13418                 for node in clean_set:
13419                         graph.add(node, None)
13420                         mydeps = []
13421                         node_use = node.metadata["USE"].split()
13422                         for dep_type in dep_keys:
13423                                 depstr = node.metadata[dep_type]
13424                                 if not depstr:
13425                                         continue
13426                                 try:
13427                                         portage.dep._dep_check_strict = False
13428                                         success, atoms = portage.dep_check(depstr, None, settings,
13429                                                 myuse=node_use, trees=resolver._graph_trees,
13430                                                 myroot=myroot)
13431                                 finally:
13432                                         portage.dep._dep_check_strict = True
13433                                 if not success:
13434                                         # Ignore invalid deps of packages that will
13435                                         # be uninstalled anyway.
13436                                         continue
13437
13438                                 priority = priority_map[dep_type]
13439                                 for atom in atoms:
13440                                         if not isinstance(atom, portage.dep.Atom):
13441                                                 # Ignore invalid atoms returned from dep_check().
13442                                                 continue
13443                                         if atom.blocker:
13444                                                 continue
13445                                         matches = vardb.match_pkgs(atom)
13446                                         if not matches:
13447                                                 continue
13448                                         for child_node in matches:
13449                                                 if child_node in clean_set:
13450                                                         graph.add(child_node, node, priority=priority)
13451
13452                 ordered = True
13453                 if len(graph.order) == len(graph.root_nodes()):
13454                         # If there are no dependencies between packages
13455                         # let unmerge() group them by cat/pn.
13456                         ordered = False
13457                         cleanlist = [pkg.cpv for pkg in graph.order]
13458                 else:
13459                         # Order nodes from lowest to highest overall reference count for
13460                         # optimal root node selection.
13461                         node_refcounts = {}
13462                         for node in graph.order:
13463                                 node_refcounts[node] = len(graph.parent_nodes(node))
13464                         def cmp_reference_count(node1, node2):
13465                                 return node_refcounts[node1] - node_refcounts[node2]
13466                         graph.order.sort(cmp_reference_count)
13467         
13468                         ignore_priority_range = [None]
13469                         ignore_priority_range.extend(
13470                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13471                         while not graph.empty():
13472                                 for ignore_priority in ignore_priority_range:
13473                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
13474                                         if nodes:
13475                                                 break
13476                                 if not nodes:
13477                                         raise AssertionError("no root nodes")
13478                                 if ignore_priority is not None:
13479                                         # Some deps have been dropped due to circular dependencies,
13480                                         # so only pop one node in order do minimize the number that
13481                                         # are dropped.
13482                                         del nodes[1:]
13483                                 for node in nodes:
13484                                         graph.remove(node)
13485                                         cleanlist.append(node.cpv)
13486
13487                 unmerge(root_config, myopts, "unmerge", cleanlist,
13488                         ldpath_mtimes, ordered=ordered)
13489
13490         if action == "prune":
13491                 return
13492
13493         if not cleanlist and "--quiet" in myopts:
13494                 return
13495
13496         print "Packages installed:   "+str(len(vardb.cpv_all()))
13497         print "Packages in world:    " + \
13498                 str(len(root_config.sets["world"].getAtoms()))
13499         print "Packages in system:   " + \
13500                 str(len(root_config.sets["system"].getAtoms()))
13501         print "Required packages:    "+str(required_pkgs_total)
13502         if "--pretend" in myopts:
13503                 print "Number to remove:     "+str(len(cleanlist))
13504         else:
13505                 print "Number removed:       "+str(len(cleanlist))
13506
13507 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13508         skip_masked=False, skip_unsatisfied=False):
13509         """
13510         Construct a depgraph for the given resume list. This will raise
13511         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13512         @rtype: tuple
13513         @returns: (success, depgraph, dropped_tasks)
13514         """
13515         mergelist = mtimedb["resume"]["mergelist"]
13516         dropped_tasks = set()
13517         while True:
13518                 mydepgraph = depgraph(settings, trees,
13519                         myopts, myparams, spinner)
13520                 try:
13521                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13522                                 skip_masked=skip_masked)
13523                 except depgraph.UnsatisfiedResumeDep, e:
13524                         if not skip_unsatisfied:
13525                                 raise
13526
13527                         graph = mydepgraph.digraph
13528                         unsatisfied_parents = dict((dep.parent, dep.parent) \
13529                                 for dep in e.value)
13530                         traversed_nodes = set()
13531                         unsatisfied_stack = list(unsatisfied_parents)
13532                         while unsatisfied_stack:
13533                                 pkg = unsatisfied_stack.pop()
13534                                 if pkg in traversed_nodes:
13535                                         continue
13536                                 traversed_nodes.add(pkg)
13537
13538                                 # If this package was pulled in by a parent
13539                                 # package scheduled for merge, removing this
13540                                 # package may cause the the parent package's
13541                                 # dependency to become unsatisfied.
13542                                 for parent_node in graph.parent_nodes(pkg):
13543                                         if not isinstance(parent_node, Package) \
13544                                                 or parent_node.operation not in ("merge", "nomerge"):
13545                                                 continue
13546                                         unsatisfied = \
13547                                                 graph.child_nodes(parent_node,
13548                                                 ignore_priority=DepPriority.SOFT)
13549                                         if pkg in unsatisfied:
13550                                                 unsatisfied_parents[parent_node] = parent_node
13551                                                 unsatisfied_stack.append(parent_node)
13552
13553                         pruned_mergelist = [x for x in mergelist \
13554                                 if isinstance(x, list) and \
13555                                 tuple(x) not in unsatisfied_parents]
13556
13557                         # If the mergelist doesn't shrink then this loop is infinite.
13558                         if len(pruned_mergelist) == len(mergelist):
13559                                 # This happens if a package can't be dropped because
13560                                 # it's already installed, but it has unsatisfied PDEPEND.
13561                                 raise
13562                         mergelist[:] = pruned_mergelist
13563
13564                         # Exclude installed packages that have been removed from the graph due
13565                         # to failure to build/install runtime dependencies after the dependent
13566                         # package has already been installed.
13567                         dropped_tasks.update(pkg for pkg in \
13568                                 unsatisfied_parents if pkg.operation != "nomerge")
13569                         mydepgraph.break_refs(unsatisfied_parents)
13570
13571                         del e, graph, traversed_nodes, \
13572                                 unsatisfied_parents, unsatisfied_stack
13573                         continue
13574                 else:
13575                         break
13576         return (success, mydepgraph, dropped_tasks)
13577
13578 def action_build(settings, trees, mtimedb,
13579         myopts, myaction, myfiles, spinner):
13580
13581         # validate the state of the resume data
13582         # so that we can make assumptions later.
13583         for k in ("resume", "resume_backup"):
13584                 if k not in mtimedb:
13585                         continue
13586                 resume_data = mtimedb[k]
13587                 if not isinstance(resume_data, dict):
13588                         del mtimedb[k]
13589                         continue
13590                 mergelist = resume_data.get("mergelist")
13591                 if not isinstance(mergelist, list):
13592                         del mtimedb[k]
13593                         continue
13594                 for x in mergelist:
13595                         if not (isinstance(x, list) and len(x) == 4):
13596                                 continue
13597                         pkg_type, pkg_root, pkg_key, pkg_action = x
13598                         if pkg_root not in trees:
13599                                 # Current $ROOT setting differs,
13600                                 # so the list must be stale.
13601                                 mergelist = None
13602                                 break
13603                 if not mergelist:
13604                         del mtimedb[k]
13605                         continue
13606                 resume_opts = resume_data.get("myopts")
13607                 if not isinstance(resume_opts, (dict, list)):
13608                         del mtimedb[k]
13609                         continue
13610                 favorites = resume_data.get("favorites")
13611                 if not isinstance(favorites, list):
13612                         del mtimedb[k]
13613                         continue
13614
13615         resume = False
13616         if "--resume" in myopts and \
13617                 ("resume" in mtimedb or
13618                 "resume_backup" in mtimedb):
13619                 resume = True
13620                 if "resume" not in mtimedb:
13621                         mtimedb["resume"] = mtimedb["resume_backup"]
13622                         del mtimedb["resume_backup"]
13623                         mtimedb.commit()
13624                 # "myopts" is a list for backward compatibility.
13625                 resume_opts = mtimedb["resume"].get("myopts", [])
13626                 if isinstance(resume_opts, list):
13627                         resume_opts = dict((k,True) for k in resume_opts)
13628                 for opt in ("--skipfirst", "--ask", "--tree"):
13629                         resume_opts.pop(opt, None)
13630                 myopts.update(resume_opts)
13631
13632                 if "--debug" in myopts:
13633                         writemsg_level("myopts %s\n" % (myopts,))
13634
13635                 # Adjust config according to options of the command being resumed.
13636                 for myroot in trees:
13637                         mysettings =  trees[myroot]["vartree"].settings
13638                         mysettings.unlock()
13639                         adjust_config(myopts, mysettings)
13640                         mysettings.lock()
13641                         del myroot, mysettings
13642
13643         ldpath_mtimes = mtimedb["ldpath"]
13644         favorites=[]
13645         merge_count = 0
13646         buildpkgonly = "--buildpkgonly" in myopts
13647         pretend = "--pretend" in myopts
13648         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13649         ask = "--ask" in myopts
13650         nodeps = "--nodeps" in myopts
13651         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13652         tree = "--tree" in myopts
13653         if nodeps and tree:
13654                 tree = False
13655                 del myopts["--tree"]
13656                 portage.writemsg(colorize("WARN", " * ") + \
13657                         "--tree is broken with --nodeps. Disabling...\n")
13658         debug = "--debug" in myopts
13659         verbose = "--verbose" in myopts
13660         quiet = "--quiet" in myopts
13661         if pretend or fetchonly:
13662                 # make the mtimedb readonly
13663                 mtimedb.filename = None
13664         if "--digest" in myopts:
13665                 msg = "The --digest option can prevent corruption from being" + \
13666                         " noticed. The `repoman manifest` command is the preferred" + \
13667                         " way to generate manifests and it is capable of doing an" + \
13668                         " entire repository or category at once."
13669                 prefix = bad(" * ")
13670                 writemsg(prefix + "\n")
13671                 from textwrap import wrap
13672                 for line in wrap(msg, 72):
13673                         writemsg("%s%s\n" % (prefix, line))
13674                 writemsg(prefix + "\n")
13675
13676         if "--quiet" not in myopts and \
13677                 ("--pretend" in myopts or "--ask" in myopts or \
13678                 "--tree" in myopts or "--verbose" in myopts):
13679                 action = ""
13680                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13681                         action = "fetched"
13682                 elif "--buildpkgonly" in myopts:
13683                         action = "built"
13684                 else:
13685                         action = "merged"
13686                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13687                         print
13688                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
13689                         print
13690                 else:
13691                         print
13692                         print darkgreen("These are the packages that would be %s, in order:") % action
13693                         print
13694
13695         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13696         if not show_spinner:
13697                 spinner.update = spinner.update_quiet
13698
13699         if resume:
13700                 favorites = mtimedb["resume"].get("favorites")
13701                 if not isinstance(favorites, list):
13702                         favorites = []
13703
13704                 if show_spinner:
13705                         print "Calculating dependencies  ",
13706                 myparams = create_depgraph_params(myopts, myaction)
13707
13708                 resume_data = mtimedb["resume"]
13709                 mergelist = resume_data["mergelist"]
13710                 if mergelist and "--skipfirst" in myopts:
13711                         for i, task in enumerate(mergelist):
13712                                 if isinstance(task, list) and \
13713                                         task and task[-1] == "merge":
13714                                         del mergelist[i]
13715                                         break
13716
13717                 skip_masked      = "--skipfirst" in myopts
13718                 skip_unsatisfied = "--skipfirst" in myopts
13719                 success = False
13720                 mydepgraph = None
13721                 try:
13722                         success, mydepgraph, dropped_tasks = resume_depgraph(
13723                                 settings, trees, mtimedb, myopts, myparams, spinner,
13724                                 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13725                 except (portage.exception.PackageNotFound,
13726                         depgraph.UnsatisfiedResumeDep), e:
13727                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13728                                 mydepgraph = e.depgraph
13729                         if show_spinner:
13730                                 print
13731                         from textwrap import wrap
13732                         from portage.output import EOutput
13733                         out = EOutput()
13734
13735                         resume_data = mtimedb["resume"]
13736                         mergelist = resume_data.get("mergelist")
13737                         if not isinstance(mergelist, list):
13738                                 mergelist = []
13739                         if mergelist and debug or (verbose and not quiet):
13740                                 out.eerror("Invalid resume list:")
13741                                 out.eerror("")
13742                                 indent = "  "
13743                                 for task in mergelist:
13744                                         if isinstance(task, list):
13745                                                 out.eerror(indent + str(tuple(task)))
13746                                 out.eerror("")
13747
13748                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13749                                 out.eerror("One or more packages are either masked or " + \
13750                                         "have missing dependencies:")
13751                                 out.eerror("")
13752                                 indent = "  "
13753                                 for dep in e.value:
13754                                         if dep.atom is None:
13755                                                 out.eerror(indent + "Masked package:")
13756                                                 out.eerror(2 * indent + str(dep.parent))
13757                                                 out.eerror("")
13758                                         else:
13759                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
13760                                                 out.eerror(2 * indent + str(dep.parent))
13761                                                 out.eerror("")
13762                                 msg = "The resume list contains packages " + \
13763                                         "that are either masked or have " + \
13764                                         "unsatisfied dependencies. " + \
13765                                         "Please restart/continue " + \
13766                                         "the operation manually, or use --skipfirst " + \
13767                                         "to skip the first package in the list and " + \
13768                                         "any other packages that may be " + \
13769                                         "masked or have missing dependencies."
13770                                 for line in wrap(msg, 72):
13771                                         out.eerror(line)
13772                         elif isinstance(e, portage.exception.PackageNotFound):
13773                                 out.eerror("An expected package is " + \
13774                                         "not available: %s" % str(e))
13775                                 out.eerror("")
13776                                 msg = "The resume list contains one or more " + \
13777                                         "packages that are no longer " + \
13778                                         "available. Please restart/continue " + \
13779                                         "the operation manually."
13780                                 for line in wrap(msg, 72):
13781                                         out.eerror(line)
13782                 else:
13783                         if show_spinner:
13784                                 print "\b\b... done!"
13785
13786                 if success:
13787                         if dropped_tasks:
13788                                 portage.writemsg("!!! One or more packages have been " + \
13789                                         "dropped due to\n" + \
13790                                         "!!! masking or unsatisfied dependencies:\n\n",
13791                                         noiselevel=-1)
13792                                 for task in dropped_tasks:
13793                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
13794                                 portage.writemsg("\n", noiselevel=-1)
13795                         del dropped_tasks
13796                 else:
13797                         if mydepgraph is not None:
13798                                 mydepgraph.display_problems()
13799                         if not (ask or pretend):
13800                                 # delete the current list and also the backup
13801                                 # since it's probably stale too.
13802                                 for k in ("resume", "resume_backup"):
13803                                         mtimedb.pop(k, None)
13804                                 mtimedb.commit()
13805
13806                         return 1
13807         else:
13808                 if ("--resume" in myopts):
13809                         print darkgreen("emerge: It seems we have nothing to resume...")
13810                         return os.EX_OK
13811
13812                 myparams = create_depgraph_params(myopts, myaction)
13813                 if "--quiet" not in myopts and "--nodeps" not in myopts:
13814                         print "Calculating dependencies  ",
13815                         sys.stdout.flush()
13816                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13817                 try:
13818                         retval, favorites = mydepgraph.select_files(myfiles)
13819                 except portage.exception.PackageNotFound, e:
13820                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13821                         return 1
13822                 except portage.exception.PackageSetNotFound, e:
13823                         root_config = trees[settings["ROOT"]]["root_config"]
13824                         display_missing_pkg_set(root_config, e.value)
13825                         return 1
13826                 if show_spinner:
13827                         print "\b\b... done!"
13828                 if not retval:
13829                         mydepgraph.display_problems()
13830                         return 1
13831
13832         if "--pretend" not in myopts and \
13833                 ("--ask" in myopts or "--tree" in myopts or \
13834                 "--verbose" in myopts) and \
13835                 not ("--quiet" in myopts and "--ask" not in myopts):
13836                 if "--resume" in myopts:
13837                         mymergelist = mydepgraph.altlist()
13838                         if len(mymergelist) == 0:
13839                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13840                                 return os.EX_OK
13841                         favorites = mtimedb["resume"]["favorites"]
13842                         retval = mydepgraph.display(
13843                                 mydepgraph.altlist(reversed=tree),
13844                                 favorites=favorites)
13845                         mydepgraph.display_problems()
13846                         if retval != os.EX_OK:
13847                                 return retval
13848                         prompt="Would you like to resume merging these packages?"
13849                 else:
13850                         retval = mydepgraph.display(
13851                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13852                                 favorites=favorites)
13853                         mydepgraph.display_problems()
13854                         if retval != os.EX_OK:
13855                                 return retval
13856                         mergecount=0
13857                         for x in mydepgraph.altlist():
13858                                 if isinstance(x, Package) and x.operation == "merge":
13859                                         mergecount += 1
13860
13861                         if mergecount==0:
13862                                 sets = trees[settings["ROOT"]]["root_config"].sets
13863                                 world_candidates = None
13864                                 if "--noreplace" in myopts and \
13865                                         not oneshot and favorites:
13866                                         # Sets that are not world candidates are filtered
13867                                         # out here since the favorites list needs to be
13868                                         # complete for depgraph.loadResumeCommand() to
13869                                         # operate correctly.
13870                                         world_candidates = [x for x in favorites \
13871                                                 if not (x.startswith(SETPREFIX) and \
13872                                                 not sets[x[1:]].world_candidate)]
13873                                 if "--noreplace" in myopts and \
13874                                         not oneshot and world_candidates:
13875                                         print
13876                                         for x in world_candidates:
13877                                                 print " %s %s" % (good("*"), x)
13878                                         prompt="Would you like to add these packages to your world favorites?"
13879                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13880                                         prompt="Nothing to merge; would you like to auto-clean packages?"
13881                                 else:
13882                                         print
13883                                         print "Nothing to merge; quitting."
13884                                         print
13885                                         return os.EX_OK
13886                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13887                                 prompt="Would you like to fetch the source files for these packages?"
13888                         else:
13889                                 prompt="Would you like to merge these packages?"
13890                 print
13891                 if "--ask" in myopts and userquery(prompt) == "No":
13892                         print
13893                         print "Quitting."
13894                         print
13895                         return os.EX_OK
13896                 # Don't ask again (e.g. when auto-cleaning packages after merge)
13897                 myopts.pop("--ask", None)
13898
13899         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13900                 if ("--resume" in myopts):
13901                         mymergelist = mydepgraph.altlist()
13902                         if len(mymergelist) == 0:
13903                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13904                                 return os.EX_OK
13905                         favorites = mtimedb["resume"]["favorites"]
13906                         retval = mydepgraph.display(
13907                                 mydepgraph.altlist(reversed=tree),
13908                                 favorites=favorites)
13909                         mydepgraph.display_problems()
13910                         if retval != os.EX_OK:
13911                                 return retval
13912                 else:
13913                         retval = mydepgraph.display(
13914                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13915                                 favorites=favorites)
13916                         mydepgraph.display_problems()
13917                         if retval != os.EX_OK:
13918                                 return retval
13919                         if "--buildpkgonly" in myopts:
13920                                 graph_copy = mydepgraph.digraph.clone()
13921                                 for node in list(graph_copy.order):
13922                                         if not isinstance(node, Package):
13923                                                 graph_copy.remove(node)
13924                                 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13925                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
13926                                         print "!!! You have to merge the dependencies before you can build this package.\n"
13927                                         return 1
13928         else:
13929                 if "--buildpkgonly" in myopts:
13930                         graph_copy = mydepgraph.digraph.clone()
13931                         for node in list(graph_copy.order):
13932                                 if not isinstance(node, Package):
13933                                         graph_copy.remove(node)
13934                         if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13935                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13936                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13937                                 return 1
13938
13939                 if ("--resume" in myopts):
13940                         favorites=mtimedb["resume"]["favorites"]
13941                         mymergelist = mydepgraph.altlist()
13942                         mydepgraph.break_refs(mymergelist)
13943                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13944                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13945                         del mydepgraph, mymergelist
13946                         clear_caches(trees)
13947
13948                         retval = mergetask.merge()
13949                         merge_count = mergetask.curval
13950                 else:
13951                         if "resume" in mtimedb and \
13952                         "mergelist" in mtimedb["resume"] and \
13953                         len(mtimedb["resume"]["mergelist"]) > 1:
13954                                 mtimedb["resume_backup"] = mtimedb["resume"]
13955                                 del mtimedb["resume"]
13956                                 mtimedb.commit()
13957                         mtimedb["resume"]={}
13958                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
13959                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13960                         # a list type for options.
13961                         mtimedb["resume"]["myopts"] = myopts.copy()
13962
13963                         # Convert Atom instances to plain str since the mtimedb loader
13964                         # sets unpickler.find_global = None which causes unpickler.load()
13965                         # to raise the following exception:
13966                         #
13967                         # cPickle.UnpicklingError: Global and instance pickles are not supported.
13968                         #
13969                         # TODO: Maybe stop setting find_global = None, or find some other
13970                         # way to avoid accidental triggering of the above UnpicklingError.
13971                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13972
13973                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13974                                 for pkgline in mydepgraph.altlist():
13975                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13976                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13977                                                 tmpsettings = portage.config(clone=settings)
13978                                                 edebug = 0
13979                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
13980                                                         edebug = 1
13981                                                 retval = portage.doebuild(
13982                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
13983                                                         ("--pretend" in myopts),
13984                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13985                                                         tree="porttree")
13986
13987                         pkglist = mydepgraph.altlist()
13988                         mydepgraph.saveNomergeFavorites()
13989                         mydepgraph.break_refs(pkglist)
13990                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
13991                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13992                         del mydepgraph, pkglist
13993                         clear_caches(trees)
13994
13995                         retval = mergetask.merge()
13996                         merge_count = mergetask.curval
13997
13998                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13999                         if "yes" == settings.get("AUTOCLEAN"):
14000                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14001                                 unmerge(trees[settings["ROOT"]]["root_config"],
14002                                         myopts, "clean", [],
14003                                         ldpath_mtimes, autoclean=1)
14004                         else:
14005                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14006                                         + " AUTOCLEAN is disabled.  This can cause serious"
14007                                         + " problems due to overlapping packages.\n")
14008                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14009
14010                 return retval
14011
14012 def multiple_actions(action1, action2):
14013         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14014         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14015         sys.exit(1)
14016
14017 def insert_optional_args(args):
14018         """
14019         Parse optional arguments and insert a value if one has
14020         not been provided. This is done before feeding the args
14021         to the optparse parser since that parser does not support
14022         this feature natively.
14023         """
14024
14025         new_args = []
14026         jobs_opts = ("-j", "--jobs")
14027         arg_stack = args[:]
14028         arg_stack.reverse()
14029         while arg_stack:
14030                 arg = arg_stack.pop()
14031
14032                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14033                 if not (short_job_opt or arg in jobs_opts):
14034                         new_args.append(arg)
14035                         continue
14036
14037                 # Insert an empty placeholder in order to
14038                 # satisfy the requirements of optparse.
14039
14040                 new_args.append("--jobs")
14041                 job_count = None
14042                 saved_opts = None
14043                 if short_job_opt and len(arg) > 2:
14044                         if arg[:2] == "-j":
14045                                 try:
14046                                         job_count = int(arg[2:])
14047                                 except ValueError:
14048                                         saved_opts = arg[2:]
14049                         else:
14050                                 job_count = "True"
14051                                 saved_opts = arg[1:].replace("j", "")
14052
14053                 if job_count is None and arg_stack:
14054                         try:
14055                                 job_count = int(arg_stack[-1])
14056                         except ValueError:
14057                                 pass
14058                         else:
14059                                 # Discard the job count from the stack
14060                                 # since we're consuming it here.
14061                                 arg_stack.pop()
14062
14063                 if job_count is None:
14064                         # unlimited number of jobs
14065                         new_args.append("True")
14066                 else:
14067                         new_args.append(str(job_count))
14068
14069                 if saved_opts is not None:
14070                         new_args.append("-" + saved_opts)
14071
14072         return new_args
14073
14074 def parse_opts(tmpcmdline, silent=False):
14075         myaction=None
14076         myopts = {}
14077         myfiles=[]
14078
14079         global actions, options, shortmapping
14080
14081         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14082         argument_options = {
14083                 "--config-root": {
14084                         "help":"specify the location for portage configuration files",
14085                         "action":"store"
14086                 },
14087                 "--color": {
14088                         "help":"enable or disable color output",
14089                         "type":"choice",
14090                         "choices":("y", "n")
14091                 },
14092
14093                 "--jobs": {
14094
14095                         "help"   : "Specifies the number of packages to build " + \
14096                                 "simultaneously.",
14097
14098                         "action" : "store"
14099                 },
14100
14101                 "--load-average": {
14102
14103                         "help"   :"Specifies that no new builds should be started " + \
14104                                 "if there are other builds running and the load average " + \
14105                                 "is at least LOAD (a floating-point number).",
14106
14107                         "action" : "store"
14108                 },
14109
14110                 "--with-bdeps": {
14111                         "help":"include unnecessary build time dependencies",
14112                         "type":"choice",
14113                         "choices":("y", "n")
14114                 },
14115                 "--reinstall": {
14116                         "help":"specify conditions to trigger package reinstallation",
14117                         "type":"choice",
14118                         "choices":["changed-use"]
14119                 }
14120         }
14121
14122         from optparse import OptionParser
14123         parser = OptionParser()
14124         if parser.has_option("--help"):
14125                 parser.remove_option("--help")
14126
14127         for action_opt in actions:
14128                 parser.add_option("--" + action_opt, action="store_true",
14129                         dest=action_opt.replace("-", "_"), default=False)
14130         for myopt in options:
14131                 parser.add_option(myopt, action="store_true",
14132                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14133         for shortopt, longopt in shortmapping.iteritems():
14134                 parser.add_option("-" + shortopt, action="store_true",
14135                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14136         for myalias, myopt in longopt_aliases.iteritems():
14137                 parser.add_option(myalias, action="store_true",
14138                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14139
14140         for myopt, kwargs in argument_options.iteritems():
14141                 parser.add_option(myopt,
14142                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14143
14144         tmpcmdline = insert_optional_args(tmpcmdline)
14145
14146         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14147
14148         if myoptions.jobs:
14149                 jobs = None
14150                 if myoptions.jobs == "True":
14151                         jobs = True
14152                 else:
14153                         try:
14154                                 jobs = int(myoptions.jobs)
14155                         except ValueError:
14156                                 jobs = -1
14157
14158                 if jobs is not True and \
14159                         jobs < 1:
14160                         jobs = None
14161                         if not silent:
14162                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14163                                         (myoptions.jobs,), noiselevel=-1)
14164
14165                 myoptions.jobs = jobs
14166
14167         if myoptions.load_average:
14168                 try:
14169                         load_average = float(myoptions.load_average)
14170                 except ValueError:
14171                         load_average = 0.0
14172
14173                 if load_average <= 0.0:
14174                         load_average = None
14175                         if not silent:
14176                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14177                                         (myoptions.load_average,), noiselevel=-1)
14178
14179                 myoptions.load_average = load_average
14180
14181         for myopt in options:
14182                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14183                 if v:
14184                         myopts[myopt] = True
14185
14186         for myopt in argument_options:
14187                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14188                 if v is not None:
14189                         myopts[myopt] = v
14190
14191         for action_opt in actions:
14192                 v = getattr(myoptions, action_opt.replace("-", "_"))
14193                 if v:
14194                         if myaction:
14195                                 multiple_actions(myaction, action_opt)
14196                                 sys.exit(1)
14197                         myaction = action_opt
14198
14199         myfiles += myargs
14200
14201         return myaction, myopts, myfiles
14202
14203 def validate_ebuild_environment(trees):
14204         for myroot in trees:
14205                 settings = trees[myroot]["vartree"].settings
14206                 settings.validate()
14207
14208 def clear_caches(trees):
14209         for d in trees.itervalues():
14210                 d["porttree"].dbapi.melt()
14211                 d["porttree"].dbapi._aux_cache.clear()
14212                 d["bintree"].dbapi._aux_cache.clear()
14213                 d["bintree"].dbapi._clear_cache()
14214                 d["vartree"].dbapi.linkmap._clear_cache()
14215         portage.dircache.clear()
14216         gc.collect()
14217
14218 def load_emerge_config(trees=None):
14219         kwargs = {}
14220         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14221                 v = os.environ.get(envvar, None)
14222                 if v and v.strip():
14223                         kwargs[k] = v
14224         trees = portage.create_trees(trees=trees, **kwargs)
14225
14226         for root, root_trees in trees.iteritems():
14227                 settings = root_trees["vartree"].settings
14228                 setconfig = load_default_config(settings, root_trees)
14229                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14230
14231         settings = trees["/"]["vartree"].settings
14232
14233         for myroot in trees:
14234                 if myroot != "/":
14235                         settings = trees[myroot]["vartree"].settings
14236                         break
14237
14238         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14239         mtimedb = portage.MtimeDB(mtimedbfile)
14240         
14241         return settings, trees, mtimedb
14242
14243 def adjust_config(myopts, settings):
14244         """Make emerge specific adjustments to the config."""
14245
14246         # To enhance usability, make some vars case insensitive by forcing them to
14247         # lower case.
14248         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14249                 if myvar in settings:
14250                         settings[myvar] = settings[myvar].lower()
14251                         settings.backup_changes(myvar)
14252         del myvar
14253
14254         # Kill noauto as it will break merges otherwise.
14255         if "noauto" in settings.features:
14256                 while "noauto" in settings.features:
14257                         settings.features.remove("noauto")
14258                 settings["FEATURES"] = " ".join(settings.features)
14259                 settings.backup_changes("FEATURES")
14260
14261         CLEAN_DELAY = 5
14262         try:
14263                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14264         except ValueError, e:
14265                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14266                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14267                         settings["CLEAN_DELAY"], noiselevel=-1)
14268         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14269         settings.backup_changes("CLEAN_DELAY")
14270
14271         EMERGE_WARNING_DELAY = 10
14272         try:
14273                 EMERGE_WARNING_DELAY = int(settings.get(
14274                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14275         except ValueError, e:
14276                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14277                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14278                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14279         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14280         settings.backup_changes("EMERGE_WARNING_DELAY")
14281
14282         if "--quiet" in myopts:
14283                 settings["PORTAGE_QUIET"]="1"
14284                 settings.backup_changes("PORTAGE_QUIET")
14285
14286         if "--verbose" in myopts:
14287                 settings["PORTAGE_VERBOSE"] = "1"
14288                 settings.backup_changes("PORTAGE_VERBOSE")
14289
14290         # Set so that configs will be merged regardless of remembered status
14291         if ("--noconfmem" in myopts):
14292                 settings["NOCONFMEM"]="1"
14293                 settings.backup_changes("NOCONFMEM")
14294
14295         # Set various debug markers... They should be merged somehow.
14296         PORTAGE_DEBUG = 0
14297         try:
14298                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14299                 if PORTAGE_DEBUG not in (0, 1):
14300                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14301                                 PORTAGE_DEBUG, noiselevel=-1)
14302                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14303                                 noiselevel=-1)
14304                         PORTAGE_DEBUG = 0
14305         except ValueError, e:
14306                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14307                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14308                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14309                 del e
14310         if "--debug" in myopts:
14311                 PORTAGE_DEBUG = 1
14312         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14313         settings.backup_changes("PORTAGE_DEBUG")
14314
14315         if settings.get("NOCOLOR") not in ("yes","true"):
14316                 portage.output.havecolor = 1
14317
14318         """The explicit --color < y | n > option overrides the NOCOLOR environment
14319         variable and stdout auto-detection."""
14320         if "--color" in myopts:
14321                 if "y" == myopts["--color"]:
14322                         portage.output.havecolor = 1
14323                         settings["NOCOLOR"] = "false"
14324                 else:
14325                         portage.output.havecolor = 0
14326                         settings["NOCOLOR"] = "true"
14327                 settings.backup_changes("NOCOLOR")
14328         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14329                 portage.output.havecolor = 0
14330                 settings["NOCOLOR"] = "true"
14331                 settings.backup_changes("NOCOLOR")
14332
14333 def apply_priorities(settings):
14334         ionice(settings)
14335         nice(settings)
14336
14337 def nice(settings):
14338         try:
14339                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14340         except (OSError, ValueError), e:
14341                 out = portage.output.EOutput()
14342                 out.eerror("Failed to change nice value to '%s'" % \
14343                         settings["PORTAGE_NICENESS"])
14344                 out.eerror("%s\n" % str(e))
14345
14346 def ionice(settings):
14347
14348         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14349         if ionice_cmd:
14350                 ionice_cmd = shlex.split(ionice_cmd)
14351         if not ionice_cmd:
14352                 return
14353
14354         from portage.util import varexpand
14355         variables = {"PID" : str(os.getpid())}
14356         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14357
14358         try:
14359                 rval = portage.process.spawn(cmd, env=os.environ)
14360         except portage.exception.CommandNotFound:
14361                 # The OS kernel probably doesn't support ionice,
14362                 # so return silently.
14363                 return
14364
14365         if rval != os.EX_OK:
14366                 out = portage.output.EOutput()
14367                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14368                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14369
14370 def display_missing_pkg_set(root_config, set_name):
14371
14372         msg = []
14373         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14374                 "The following sets exist:") % \
14375                 colorize("INFORM", set_name))
14376         msg.append("")
14377
14378         for s in sorted(root_config.sets):
14379                 msg.append("    %s" % s)
14380         msg.append("")
14381
14382         writemsg_level("".join("%s\n" % l for l in msg),
14383                 level=logging.ERROR, noiselevel=-1)
14384
14385 def expand_set_arguments(myfiles, myaction, root_config):
14386         retval = os.EX_OK
14387         setconfig = root_config.setconfig
14388
14389         sets = setconfig.getSets()
14390
14391         # In order to know exactly which atoms/sets should be added to the
14392         # world file, the depgraph performs set expansion later. It will get
14393         # confused about where the atoms came from if it's not allowed to
14394         # expand them itself.
14395         do_not_expand = (None, )
14396         newargs = []
14397         for a in myfiles:
14398                 if a in ("system", "world"):
14399                         newargs.append(SETPREFIX+a)
14400                 else:
14401                         newargs.append(a)
14402         myfiles = newargs
14403         del newargs
14404         newargs = []
14405
14406         # separators for set arguments
14407         ARG_START = "{"
14408         ARG_END = "}"
14409
14410         # WARNING: all operators must be of equal length
14411         IS_OPERATOR = "/@"
14412         DIFF_OPERATOR = "-@"
14413         UNION_OPERATOR = "+@"
14414         
14415         for i in range(0, len(myfiles)):
14416                 if myfiles[i].startswith(SETPREFIX):
14417                         start = 0
14418                         end = 0
14419                         x = myfiles[i][len(SETPREFIX):]
14420                         newset = ""
14421                         while x:
14422                                 start = x.find(ARG_START)
14423                                 end = x.find(ARG_END)
14424                                 if start > 0 and start < end:
14425                                         namepart = x[:start]
14426                                         argpart = x[start+1:end]
14427                                 
14428                                         # TODO: implement proper quoting
14429                                         args = argpart.split(",")
14430                                         options = {}
14431                                         for a in args:
14432                                                 if "=" in a:
14433                                                         k, v  = a.split("=", 1)
14434                                                         options[k] = v
14435                                                 else:
14436                                                         options[a] = "True"
14437                                         setconfig.update(namepart, options)
14438                                         newset += (x[:start-len(namepart)]+namepart)
14439                                         x = x[end+len(ARG_END):]
14440                                 else:
14441                                         newset += x
14442                                         x = ""
14443                         myfiles[i] = SETPREFIX+newset
14444                                 
14445         sets = setconfig.getSets()
14446
14447         # display errors that occured while loading the SetConfig instance
14448         for e in setconfig.errors:
14449                 print colorize("BAD", "Error during set creation: %s" % e)
14450         
14451         # emerge relies on the existance of sets with names "world" and "system"
14452         required_sets = ("world", "system")
14453         missing_sets = []
14454
14455         for s in required_sets:
14456                 if s not in sets:
14457                         missing_sets.append(s)
14458         if missing_sets:
14459                 if len(missing_sets) > 2:
14460                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14461                         missing_sets_str += ', and "%s"' % missing_sets[-1]
14462                 elif len(missing_sets) == 2:
14463                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14464                 else:
14465                         missing_sets_str = '"%s"' % missing_sets[-1]
14466                 msg = ["emerge: incomplete set configuration, " + \
14467                         "missing set(s): %s" % missing_sets_str]
14468                 if sets:
14469                         msg.append("        sets defined: %s" % ", ".join(sets))
14470                 msg.append("        This usually means that '%s'" % \
14471                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14472                 msg.append("        is missing or corrupt.")
14473                 for line in msg:
14474                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14475                 return (None, 1)
14476         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14477
14478         for a in myfiles:
14479                 if a.startswith(SETPREFIX):
14480                         # support simple set operations (intersection, difference and union)
14481                         # on the commandline. Expressions are evaluated strictly left-to-right
14482                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14483                                 expression = a[len(SETPREFIX):]
14484                                 expr_sets = []
14485                                 expr_ops = []
14486                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14487                                         is_pos = expression.rfind(IS_OPERATOR)
14488                                         diff_pos = expression.rfind(DIFF_OPERATOR)
14489                                         union_pos = expression.rfind(UNION_OPERATOR)
14490                                         op_pos = max(is_pos, diff_pos, union_pos)
14491                                         s1 = expression[:op_pos]
14492                                         s2 = expression[op_pos+len(IS_OPERATOR):]
14493                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14494                                         if not s2 in sets:
14495                                                 display_missing_pkg_set(root_config, s2)
14496                                                 return (None, 1)
14497                                         expr_sets.insert(0, s2)
14498                                         expr_ops.insert(0, op)
14499                                         expression = s1
14500                                 if not expression in sets:
14501                                         display_missing_pkg_set(root_config, expression)
14502                                         return (None, 1)
14503                                 expr_sets.insert(0, expression)
14504                                 result = set(setconfig.getSetAtoms(expression))
14505                                 for i in range(0, len(expr_ops)):
14506                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
14507                                         if expr_ops[i] == IS_OPERATOR:
14508                                                 result.intersection_update(s2)
14509                                         elif expr_ops[i] == DIFF_OPERATOR:
14510                                                 result.difference_update(s2)
14511                                         elif expr_ops[i] == UNION_OPERATOR:
14512                                                 result.update(s2)
14513                                         else:
14514                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14515                                 newargs.extend(result)
14516                         else:                   
14517                                 s = a[len(SETPREFIX):]
14518                                 if s not in sets:
14519                                         display_missing_pkg_set(root_config, s)
14520                                         return (None, 1)
14521                                 setconfig.active.append(s)
14522                                 try:
14523                                         set_atoms = setconfig.getSetAtoms(s)
14524                                 except portage.exception.PackageSetNotFound, e:
14525                                         writemsg_level(("emerge: the given set '%s' " + \
14526                                                 "contains a non-existent set named '%s'.\n") % \
14527                                                 (s, e), level=logging.ERROR, noiselevel=-1)
14528                                         return (None, 1)
14529                                 if myaction in unmerge_actions and \
14530                                                 not sets[s].supportsOperation("unmerge"):
14531                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
14532                                                 "not support unmerge operations\n")
14533                                         retval = 1
14534                                 elif not set_atoms:
14535                                         print "emerge: '%s' is an empty set" % s
14536                                 elif myaction not in do_not_expand:
14537                                         newargs.extend(set_atoms)
14538                                 else:
14539                                         newargs.append(SETPREFIX+s)
14540                                 for e in sets[s].errors:
14541                                         print e
14542                 else:
14543                         newargs.append(a)
14544         return (newargs, retval)
14545
14546 def repo_name_check(trees):
14547         missing_repo_names = set()
14548         for root, root_trees in trees.iteritems():
14549                 if "porttree" in root_trees:
14550                         portdb = root_trees["porttree"].dbapi
14551                         missing_repo_names.update(portdb.porttrees)
14552                         repos = portdb.getRepositories()
14553                         for r in repos:
14554                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
14555                         if portdb.porttree_root in missing_repo_names and \
14556                                 not os.path.exists(os.path.join(
14557                                 portdb.porttree_root, "profiles")):
14558                                 # This is normal if $PORTDIR happens to be empty,
14559                                 # so don't warn about it.
14560                                 missing_repo_names.remove(portdb.porttree_root)
14561
14562         if missing_repo_names:
14563                 msg = []
14564                 msg.append("WARNING: One or more repositories " + \
14565                         "have missing repo_name entries:")
14566                 msg.append("")
14567                 for p in missing_repo_names:
14568                         msg.append("\t%s/profiles/repo_name" % (p,))
14569                 msg.append("")
14570                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14571                         "should be a plain text file containing a unique " + \
14572                         "name for the repository on the first line.", 70))
14573                 writemsg_level("".join("%s\n" % l for l in msg),
14574                         level=logging.WARNING, noiselevel=-1)
14575
14576         return bool(missing_repo_names)
14577
14578 def config_protect_check(trees):
14579         for root, root_trees in trees.iteritems():
14580                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14581                         msg = "!!! CONFIG_PROTECT is empty"
14582                         if root != "/":
14583                                 msg += " for '%s'" % root
14584                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14585
14586 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14587
14588         if "--quiet" in myopts:
14589                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14590                 print "!!! one of the following fully-qualified ebuild names instead:\n"
14591                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14592                         print "    " + colorize("INFORM", cp)
14593                 return
14594
14595         s = search(root_config, spinner, "--searchdesc" in myopts,
14596                 "--quiet" not in myopts, "--usepkg" in myopts,
14597                 "--usepkgonly" in myopts)
14598         null_cp = portage.dep_getkey(insert_category_into_atom(
14599                 arg, "null"))
14600         cat, atom_pn = portage.catsplit(null_cp)
14601         s.searchkey = atom_pn
14602         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14603                 s.addCP(cp)
14604         s.output()
14605         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14606         print "!!! one of the above fully-qualified ebuild names instead.\n"
14607
14608 def profile_check(trees, myaction, myopts):
14609         if myaction in ("info", "sync"):
14610                 return os.EX_OK
14611         elif "--version" in myopts or "--help" in myopts:
14612                 return os.EX_OK
14613         for root, root_trees in trees.iteritems():
14614                 if root_trees["root_config"].settings.profiles:
14615                         continue
14616                 # generate some profile related warning messages
14617                 validate_ebuild_environment(trees)
14618                 msg = "If you have just changed your profile configuration, you " + \
14619                         "should revert back to the previous configuration. Due to " + \
14620                         "your current profile being invalid, allowed actions are " + \
14621                         "limited to --help, --info, --sync, and --version."
14622                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14623                         level=logging.ERROR, noiselevel=-1)
14624                 return 1
14625         return os.EX_OK
14626
14627 def emerge_main():
14628         global portage  # NFC why this is necessary now - genone
14629         portage._disable_legacy_globals()
14630         # Disable color until we're sure that it should be enabled (after
14631         # EMERGE_DEFAULT_OPTS has been parsed).
14632         portage.output.havecolor = 0
14633         # This first pass is just for options that need to be known as early as
14634         # possible, such as --config-root.  They will be parsed again later,
14635         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14636         # the value of --config-root).
14637         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14638         if "--debug" in myopts:
14639                 os.environ["PORTAGE_DEBUG"] = "1"
14640         if "--config-root" in myopts:
14641                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14642
14643         # Portage needs to ensure a sane umask for the files it creates.
14644         os.umask(022)
14645         settings, trees, mtimedb = load_emerge_config()
14646         portdb = trees[settings["ROOT"]]["porttree"].dbapi
14647         rval = profile_check(trees, myaction, myopts)
14648         if rval != os.EX_OK:
14649                 return rval
14650
14651         if portage._global_updates(trees, mtimedb["updates"]):
14652                 mtimedb.commit()
14653                 # Reload the whole config from scratch.
14654                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14655                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14656
14657         xterm_titles = "notitles" not in settings.features
14658
14659         tmpcmdline = []
14660         if "--ignore-default-opts" not in myopts:
14661                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14662         tmpcmdline.extend(sys.argv[1:])
14663         myaction, myopts, myfiles = parse_opts(tmpcmdline)
14664
14665         if "--digest" in myopts:
14666                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14667                 # Reload the whole config from scratch so that the portdbapi internal
14668                 # config is updated with new FEATURES.
14669                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14670                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14671
14672         for myroot in trees:
14673                 mysettings =  trees[myroot]["vartree"].settings
14674                 mysettings.unlock()
14675                 adjust_config(myopts, mysettings)
14676                 mysettings["PORTAGE_COUNTER_HASH"] = \
14677                         trees[myroot]["vartree"].dbapi._counter_hash()
14678                 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14679                 mysettings.lock()
14680                 del myroot, mysettings
14681
14682         apply_priorities(settings)
14683
14684         spinner = stdout_spinner()
14685         if "candy" in settings.features:
14686                 spinner.update = spinner.update_scroll
14687
14688         if "--quiet" not in myopts:
14689                 portage.deprecated_profile_check(settings=settings)
14690                 repo_name_check(trees)
14691                 config_protect_check(trees)
14692
14693         eclasses_overridden = {}
14694         for mytrees in trees.itervalues():
14695                 mydb = mytrees["porttree"].dbapi
14696                 # Freeze the portdbapi for performance (memoize all xmatch results).
14697                 mydb.freeze()
14698                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14699         del mytrees, mydb
14700
14701         if eclasses_overridden and \
14702                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14703                 prefix = bad(" * ")
14704                 if len(eclasses_overridden) == 1:
14705                         writemsg(prefix + "Overlay eclass overrides " + \
14706                                 "eclass from PORTDIR:\n", noiselevel=-1)
14707                 else:
14708                         writemsg(prefix + "Overlay eclasses override " + \
14709                                 "eclasses from PORTDIR:\n", noiselevel=-1)
14710                 writemsg(prefix + "\n", noiselevel=-1)
14711                 for eclass_name in sorted(eclasses_overridden):
14712                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
14713                                 (eclasses_overridden[eclass_name], eclass_name),
14714                                 noiselevel=-1)
14715                 writemsg(prefix + "\n", noiselevel=-1)
14716                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14717                 "because it will trigger invalidation of cached ebuild metadata " + \
14718                 "that is distributed with the portage tree. If you must " + \
14719                 "override eclasses from PORTDIR then you are advised to add " + \
14720                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14721                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14722                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14723                 "you would like to disable this warning."
14724                 from textwrap import wrap
14725                 for line in wrap(msg, 72):
14726                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14727
14728         if "moo" in myfiles:
14729                 print """
14730
14731   Larry loves Gentoo (""" + platform.system() + """)
14732
14733  _______________________
14734 < Have you mooed today? >
14735  -----------------------
14736         \   ^__^
14737          \  (oo)\_______
14738             (__)\       )\/\ 
14739                 ||----w |
14740                 ||     ||
14741
14742 """
14743
14744         for x in myfiles:
14745                 ext = os.path.splitext(x)[1]
14746                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14747                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14748                         break
14749
14750         root_config = trees[settings["ROOT"]]["root_config"]
14751         if myaction == "list-sets":
14752                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14753                 sys.stdout.flush()
14754                 return os.EX_OK
14755
14756         # only expand sets for actions taking package arguments
14757         oldargs = myfiles[:]
14758         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14759                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14760                 if retval != os.EX_OK:
14761                         return retval
14762
14763                 # Need to handle empty sets specially, otherwise emerge will react 
14764                 # with the help message for empty argument lists
14765                 if oldargs and not myfiles:
14766                         print "emerge: no targets left after set expansion"
14767                         return 0
14768
14769         if ("--tree" in myopts) and ("--columns" in myopts):
14770                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14771                 return 1
14772
14773         if ("--quiet" in myopts):
14774                 spinner.update = spinner.update_quiet
14775                 portage.util.noiselimit = -1
14776
14777         # Always create packages if FEATURES=buildpkg
14778         # Imply --buildpkg if --buildpkgonly
14779         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14780                 if "--buildpkg" not in myopts:
14781                         myopts["--buildpkg"] = True
14782
14783         # Also allow -S to invoke search action (-sS)
14784         if ("--searchdesc" in myopts):
14785                 if myaction and myaction != "search":
14786                         myfiles.append(myaction)
14787                 if "--search" not in myopts:
14788                         myopts["--search"] = True
14789                 myaction = "search"
14790
14791         # Always try and fetch binary packages if FEATURES=getbinpkg
14792         if ("getbinpkg" in settings.features):
14793                 myopts["--getbinpkg"] = True
14794
14795         if "--buildpkgonly" in myopts:
14796                 # --buildpkgonly will not merge anything, so
14797                 # it cancels all binary package options.
14798                 for opt in ("--getbinpkg", "--getbinpkgonly",
14799                         "--usepkg", "--usepkgonly"):
14800                         myopts.pop(opt, None)
14801
14802         if "--fetch-all-uri" in myopts:
14803                 myopts["--fetchonly"] = True
14804
14805         if "--skipfirst" in myopts and "--resume" not in myopts:
14806                 myopts["--resume"] = True
14807
14808         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14809                 myopts["--usepkgonly"] = True
14810
14811         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14812                 myopts["--getbinpkg"] = True
14813
14814         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14815                 myopts["--usepkg"] = True
14816
14817         # Also allow -K to apply --usepkg/-k
14818         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14819                 myopts["--usepkg"] = True
14820
14821         # Allow -p to remove --ask
14822         if ("--pretend" in myopts) and ("--ask" in myopts):
14823                 print ">>> --pretend disables --ask... removing --ask from options."
14824                 del myopts["--ask"]
14825
14826         # forbid --ask when not in a terminal
14827         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14828         if ("--ask" in myopts) and (not sys.stdin.isatty()):
14829                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14830                         noiselevel=-1)
14831                 return 1
14832
14833         if settings.get("PORTAGE_DEBUG", "") == "1":
14834                 spinner.update = spinner.update_quiet
14835                 portage.debug=1
14836                 if "python-trace" in settings.features:
14837                         import portage.debug
14838                         portage.debug.set_trace(True)
14839
14840         if not ("--quiet" in myopts):
14841                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14842                         spinner.update = spinner.update_basic
14843
14844         if "--version" in myopts:
14845                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14846                         settings.profile_path, settings["CHOST"],
14847                         trees[settings["ROOT"]]["vartree"].dbapi)
14848                 return 0
14849         elif "--help" in myopts:
14850                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14851                 return 0
14852
14853         if "--debug" in myopts:
14854                 print "myaction", myaction
14855                 print "myopts", myopts
14856
14857         if not myaction and not myfiles and "--resume" not in myopts:
14858                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14859                 return 1
14860
14861         pretend = "--pretend" in myopts
14862         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14863         buildpkgonly = "--buildpkgonly" in myopts
14864
14865         # check if root user is the current user for the actions where emerge needs this
14866         if portage.secpass < 2:
14867                 # We've already allowed "--version" and "--help" above.
14868                 if "--pretend" not in myopts and myaction not in ("search","info"):
14869                         need_superuser = not \
14870                                 (fetchonly or \
14871                                 (buildpkgonly and secpass >= 1) or \
14872                                 myaction in ("metadata", "regen") or \
14873                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14874                         if portage.secpass < 1 or \
14875                                 need_superuser:
14876                                 if need_superuser:
14877                                         access_desc = "superuser"
14878                                 else:
14879                                         access_desc = "portage group"
14880                                 # Always show portage_group_warning() when only portage group
14881                                 # access is required but the user is not in the portage group.
14882                                 from portage.data import portage_group_warning
14883                                 if "--ask" in myopts:
14884                                         myopts["--pretend"] = True
14885                                         del myopts["--ask"]
14886                                         print ("%s access is required... " + \
14887                                                 "adding --pretend to options.\n") % access_desc
14888                                         if portage.secpass < 1 and not need_superuser:
14889                                                 portage_group_warning()
14890                                 else:
14891                                         sys.stderr.write(("emerge: %s access is " + \
14892                                                 "required.\n\n") % access_desc)
14893                                         if portage.secpass < 1 and not need_superuser:
14894                                                 portage_group_warning()
14895                                         return 1
14896
14897         disable_emergelog = False
14898         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14899                 if x in myopts:
14900                         disable_emergelog = True
14901                         break
14902         if myaction in ("search", "info"):
14903                 disable_emergelog = True
14904         if disable_emergelog:
14905                 """ Disable emergelog for everything except build or unmerge
14906                 operations.  This helps minimize parallel emerge.log entries that can
14907                 confuse log parsers.  We especially want it disabled during
14908                 parallel-fetch, which uses --resume --fetchonly."""
14909                 global emergelog
14910                 def emergelog(*pargs, **kargs):
14911                         pass
14912
14913         if not "--pretend" in myopts:
14914                 emergelog(xterm_titles, "Started emerge on: "+\
14915                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14916                 myelogstr=""
14917                 if myopts:
14918                         myelogstr=" ".join(myopts)
14919                 if myaction:
14920                         myelogstr+=" "+myaction
14921                 if myfiles:
14922                         myelogstr += " " + " ".join(oldargs)
14923                 emergelog(xterm_titles, " *** emerge " + myelogstr)
14924         del oldargs
14925
14926         def emergeexitsig(signum, frame):
14927                 signal.signal(signal.SIGINT, signal.SIG_IGN)
14928                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14929                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14930                 sys.exit(100+signum)
14931         signal.signal(signal.SIGINT, emergeexitsig)
14932         signal.signal(signal.SIGTERM, emergeexitsig)
14933
14934         def emergeexit():
14935                 """This gets out final log message in before we quit."""
14936                 if "--pretend" not in myopts:
14937                         emergelog(xterm_titles, " *** terminating.")
14938                 if "notitles" not in settings.features:
14939                         xtermTitleReset()
14940         portage.atexit_register(emergeexit)
14941
14942         if myaction in ("config", "metadata", "regen", "sync"):
14943                 if "--pretend" in myopts:
14944                         sys.stderr.write(("emerge: The '%s' action does " + \
14945                                 "not support '--pretend'.\n") % myaction)
14946                         return 1
14947
14948         if "sync" == myaction:
14949                 return action_sync(settings, trees, mtimedb, myopts, myaction)
14950         elif "metadata" == myaction:
14951                 action_metadata(settings, portdb, myopts)
14952         elif myaction=="regen":
14953                 validate_ebuild_environment(trees)
14954                 action_regen(settings, portdb, myopts.get("--jobs"),
14955                         myopts.get("--load-average"))
14956         # HELP action
14957         elif "config"==myaction:
14958                 validate_ebuild_environment(trees)
14959                 action_config(settings, trees, myopts, myfiles)
14960
14961         # SEARCH action
14962         elif "search"==myaction:
14963                 validate_ebuild_environment(trees)
14964                 action_search(trees[settings["ROOT"]]["root_config"],
14965                         myopts, myfiles, spinner)
14966         elif myaction in ("clean", "unmerge") or \
14967                 (myaction == "prune" and "--nodeps" in myopts):
14968                 validate_ebuild_environment(trees)
14969
14970                 # Ensure atoms are valid before calling unmerge().
14971                 # For backward compat, leading '=' is not required.
14972                 for x in myfiles:
14973                         if is_valid_package_atom(x) or \
14974                                 is_valid_package_atom("=" + x):
14975                                 continue
14976                         msg = []
14977                         msg.append("'%s' is not a valid package atom." % (x,))
14978                         msg.append("Please check ebuild(5) for full details.")
14979                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14980                                 level=logging.ERROR, noiselevel=-1)
14981                         return 1
14982
14983                 # When given a list of atoms, unmerge
14984                 # them in the order given.
14985                 ordered = myaction == "unmerge"
14986                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14987                         mtimedb["ldpath"], ordered=ordered):
14988                         if not (buildpkgonly or fetchonly or pretend):
14989                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14990
14991         elif myaction in ("depclean", "info", "prune"):
14992
14993                 # Ensure atoms are valid before calling unmerge().
14994                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14995                 valid_atoms = []
14996                 for x in myfiles:
14997                         if is_valid_package_atom(x):
14998                                 try:
14999                                         valid_atoms.append(
15000                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15001                                 except portage.exception.AmbiguousPackageName, e:
15002                                         msg = "The short ebuild name \"" + x + \
15003                                                 "\" is ambiguous.  Please specify " + \
15004                                                 "one of the following " + \
15005                                                 "fully-qualified ebuild names instead:"
15006                                         for line in textwrap.wrap(msg, 70):
15007                                                 writemsg_level("!!! %s\n" % (line,),
15008                                                         level=logging.ERROR, noiselevel=-1)
15009                                         for i in e[0]:
15010                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15011                                                         level=logging.ERROR, noiselevel=-1)
15012                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15013                                         return 1
15014                                 continue
15015                         msg = []
15016                         msg.append("'%s' is not a valid package atom." % (x,))
15017                         msg.append("Please check ebuild(5) for full details.")
15018                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15019                                 level=logging.ERROR, noiselevel=-1)
15020                         return 1
15021
15022                 if myaction == "info":
15023                         return action_info(settings, trees, myopts, valid_atoms)
15024
15025                 validate_ebuild_environment(trees)
15026                 action_depclean(settings, trees, mtimedb["ldpath"],
15027                         myopts, myaction, valid_atoms, spinner)
15028                 if not (buildpkgonly or fetchonly or pretend):
15029                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15030         # "update", "system", or just process files:
15031         else:
15032                 validate_ebuild_environment(trees)
15033                 if "--pretend" not in myopts:
15034                         display_news_notification(root_config, myopts)
15035                 retval = action_build(settings, trees, mtimedb,
15036                         myopts, myaction, myfiles, spinner)
15037                 root_config = trees[settings["ROOT"]]["root_config"]
15038                 post_emerge(root_config, myopts, mtimedb, retval)
15039
15040                 return retval