Skip the code from bug #259954 when $ROOT != / since it shouldn't matter if
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",      "--version"
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391                 if not pkgsettings._accept_chost(pkg):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419                 if not pkgsettings._accept_chost(pkg):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439         if metadata is None:
1440                 mreasons = ["corruption"]
1441         else:
1442                 pkg = Package(type_name=pkg_type, root_config=root_config,
1443                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1444                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445         return metadata, mreasons
1446
1447 def show_masked_packages(masked_packages):
1448         shown_licenses = set()
1449         shown_comments = set()
1450         # Maybe there is both an ebuild and a binary. Only
1451         # show one of them to avoid redundant appearance.
1452         shown_cpvs = set()
1453         have_eapi_mask = False
1454         for (root_config, pkgsettings, cpv,
1455                 metadata, mreasons) in masked_packages:
1456                 if cpv in shown_cpvs:
1457                         continue
1458                 shown_cpvs.add(cpv)
1459                 comment, filename = None, None
1460                 if "package.mask" in mreasons:
1461                         comment, filename = \
1462                                 portage.getmaskingreason(
1463                                 cpv, metadata=metadata,
1464                                 settings=pkgsettings,
1465                                 portdb=root_config.trees["porttree"].dbapi,
1466                                 return_location=True)
1467                 missing_licenses = []
1468                 if metadata:
1469                         if not portage.eapi_is_supported(metadata["EAPI"]):
1470                                 have_eapi_mask = True
1471                         try:
1472                                 missing_licenses = \
1473                                         pkgsettings._getMissingLicenses(
1474                                                 cpv, metadata)
1475                         except portage.exception.InvalidDependString:
1476                                 # This will have already been reported
1477                                 # above via mreasons.
1478                                 pass
1479
1480                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481                 if comment and comment not in shown_comments:
1482                         print filename+":"
1483                         print comment
1484                         shown_comments.add(comment)
1485                 portdb = root_config.trees["porttree"].dbapi
1486                 for l in missing_licenses:
1487                         l_path = portdb.findLicensePath(l)
1488                         if l in shown_licenses:
1489                                 continue
1490                         msg = ("A copy of the '%s' license" + \
1491                         " is located at '%s'.") % (l, l_path)
1492                         print msg
1493                         print
1494                         shown_licenses.add(l)
1495         return have_eapi_mask
1496
1497 class Task(SlotObject):
1498         __slots__ = ("_hash_key", "_hash_value")
1499
1500         def _get_hash_key(self):
1501                 hash_key = getattr(self, "_hash_key", None)
1502                 if hash_key is None:
1503                         raise NotImplementedError(self)
1504                 return hash_key
1505
1506         def __eq__(self, other):
1507                 return self._get_hash_key() == other
1508
1509         def __ne__(self, other):
1510                 return self._get_hash_key() != other
1511
1512         def __hash__(self):
1513                 hash_value = getattr(self, "_hash_value", None)
1514                 if hash_value is None:
1515                         self._hash_value = hash(self._get_hash_key())
1516                 return self._hash_value
1517
1518         def __len__(self):
1519                 return len(self._get_hash_key())
1520
1521         def __getitem__(self, key):
1522                 return self._get_hash_key()[key]
1523
1524         def __iter__(self):
1525                 return iter(self._get_hash_key())
1526
1527         def __contains__(self, key):
1528                 return key in self._get_hash_key()
1529
1530         def __str__(self):
1531                 return str(self._get_hash_key())
1532
1533 class Blocker(Task):
1534
1535         __hash__ = Task.__hash__
1536         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1537
1538         def __init__(self, **kwargs):
1539                 Task.__init__(self, **kwargs)
1540                 self.cp = portage.dep_getkey(self.atom)
1541
1542         def _get_hash_key(self):
1543                 hash_key = getattr(self, "_hash_key", None)
1544                 if hash_key is None:
1545                         self._hash_key = \
1546                                 ("blocks", self.root, self.atom, self.eapi)
1547                 return self._hash_key
1548
1549 class Package(Task):
1550
1551         __hash__ = Task.__hash__
1552         __slots__ = ("built", "cpv", "depth",
1553                 "installed", "metadata", "onlydeps", "operation",
1554                 "root_config", "type_name",
1555                 "category", "counter", "cp", "cpv_split",
1556                 "inherited", "iuse", "mtime",
1557                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1558
1559         metadata_keys = [
1560                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561                 "INHERITED", "IUSE", "KEYWORDS",
1562                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1564
1565         def __init__(self, **kwargs):
1566                 Task.__init__(self, **kwargs)
1567                 self.root = self.root_config.root
1568                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569                 self.cp = portage.cpv_getkey(self.cpv)
1570                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571                 self.category, self.pf = portage.catsplit(self.cpv)
1572                 self.cpv_split = portage.catpkgsplit(self.cpv)
1573                 self.pv_split = self.cpv_split[1:]
1574
1575         class _use(object):
1576
1577                 __slots__ = ("__weakref__", "enabled")
1578
1579                 def __init__(self, use):
1580                         self.enabled = frozenset(use)
1581
1582         class _iuse(object):
1583
1584                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1585
1586                 def __init__(self, tokens, iuse_implicit):
1587                         self.tokens = tuple(tokens)
1588                         self.iuse_implicit = iuse_implicit
1589                         enabled = []
1590                         disabled = []
1591                         other = []
1592                         for x in tokens:
1593                                 prefix = x[:1]
1594                                 if prefix == "+":
1595                                         enabled.append(x[1:])
1596                                 elif prefix == "-":
1597                                         disabled.append(x[1:])
1598                                 else:
1599                                         other.append(x)
1600                         self.enabled = frozenset(enabled)
1601                         self.disabled = frozenset(disabled)
1602                         self.all = frozenset(chain(enabled, disabled, other))
1603
1604                 def __getattribute__(self, name):
1605                         if name == "regex":
1606                                 try:
1607                                         return object.__getattribute__(self, "regex")
1608                                 except AttributeError:
1609                                         all = object.__getattribute__(self, "all")
1610                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611                                         # Escape anything except ".*" which is supposed
1612                                         # to pass through from _get_implicit_iuse()
1613                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614                                         regex = "^(%s)$" % "|".join(regex)
1615                                         regex = regex.replace("\\.\\*", ".*")
1616                                         self.regex = re.compile(regex)
1617                         return object.__getattribute__(self, name)
1618
1619         def _get_hash_key(self):
1620                 hash_key = getattr(self, "_hash_key", None)
1621                 if hash_key is None:
1622                         if self.operation is None:
1623                                 self.operation = "merge"
1624                                 if self.onlydeps or self.installed:
1625                                         self.operation = "nomerge"
1626                         self._hash_key = \
1627                                 (self.type_name, self.root, self.cpv, self.operation)
1628                 return self._hash_key
1629
1630         def __lt__(self, other):
1631                 if other.cp != self.cp:
1632                         return False
1633                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1634                         return True
1635                 return False
1636
1637         def __le__(self, other):
1638                 if other.cp != self.cp:
1639                         return False
1640                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1641                         return True
1642                 return False
1643
1644         def __gt__(self, other):
1645                 if other.cp != self.cp:
1646                         return False
1647                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1648                         return True
1649                 return False
1650
1651         def __ge__(self, other):
1652                 if other.cp != self.cp:
1653                         return False
1654                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1655                         return True
1656                 return False
1657
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659         if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1662
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1665
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1667         """
1668         Detect metadata updates and synchronize Package attributes.
1669         """
1670
1671         __slots__ = ("_pkg",)
1672         _wrapped_keys = frozenset(
1673                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1674
1675         def __init__(self, pkg, metadata):
1676                 _PackageMetadataWrapperBase.__init__(self)
1677                 self._pkg = pkg
1678                 self.update(metadata)
1679
1680         def __setitem__(self, k, v):
1681                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682                 if k in self._wrapped_keys:
1683                         getattr(self, "_set_" + k.lower())(k, v)
1684
1685         def _set_inherited(self, k, v):
1686                 if isinstance(v, basestring):
1687                         v = frozenset(v.split())
1688                 self._pkg.inherited = v
1689
1690         def _set_iuse(self, k, v):
1691                 self._pkg.iuse = self._pkg._iuse(
1692                         v.split(), self._pkg.root_config.iuse_implicit)
1693
1694         def _set_slot(self, k, v):
1695                 self._pkg.slot = v
1696
1697         def _set_use(self, k, v):
1698                 self._pkg.use = self._pkg._use(v.split())
1699
1700         def _set_counter(self, k, v):
1701                 if isinstance(v, basestring):
1702                         try:
1703                                 v = long(v.strip())
1704                         except ValueError:
1705                                 v = 0
1706                 self._pkg.counter = v
1707
1708         def _set__mtime_(self, k, v):
1709                 if isinstance(v, basestring):
1710                         try:
1711                                 v = long(v.strip())
1712                         except ValueError:
1713                                 v = 0
1714                 self._pkg.mtime = v
1715
1716 class EbuildFetchonly(SlotObject):
1717
1718         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1719
1720         def execute(self):
1721                 settings = self.settings
1722                 pkg = self.pkg
1723                 portdb = pkg.root_config.trees["porttree"].dbapi
1724                 ebuild_path = portdb.findname(pkg.cpv)
1725                 settings.setcpv(pkg)
1726                 debug = settings.get("PORTAGE_DEBUG") == "1"
1727                 use_cache = 1 # always true
1728                 portage.doebuild_environment(ebuild_path, "fetch",
1729                         settings["ROOT"], settings, debug, use_cache, portdb)
1730                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1731
1732                 if restrict_fetch:
1733                         rval = self._execute_with_builddir()
1734                 else:
1735                         rval = portage.doebuild(ebuild_path, "fetch",
1736                                 settings["ROOT"], settings, debug=debug,
1737                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738                                 mydbapi=portdb, tree="porttree")
1739
1740                         if rval != os.EX_OK:
1741                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742                                 eerror(msg, phase="unpack", key=pkg.cpv)
1743
1744                 return rval
1745
1746         def _execute_with_builddir(self):
1747                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748                 # ensuring sane $PWD (bug #239560) and storing elog
1749                 # messages. Use a private temp directory, in order
1750                 # to avoid locking the main one.
1751                 settings = self.settings
1752                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753                 from tempfile import mkdtemp
1754                 try:
1755                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1756                 except OSError, e:
1757                         if e.errno != portage.exception.PermissionDenied.errno:
1758                                 raise
1759                         raise portage.exception.PermissionDenied(global_tmpdir)
1760                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761                 settings.backup_changes("PORTAGE_TMPDIR")
1762                 try:
1763                         retval = self._execute()
1764                 finally:
1765                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1766                         settings.backup_changes("PORTAGE_TMPDIR")
1767                         shutil.rmtree(private_tmpdir)
1768                 return retval
1769
1770         def _execute(self):
1771                 settings = self.settings
1772                 pkg = self.pkg
1773                 root_config = pkg.root_config
1774                 portdb = root_config.trees["porttree"].dbapi
1775                 ebuild_path = portdb.findname(pkg.cpv)
1776                 debug = settings.get("PORTAGE_DEBUG") == "1"
1777                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1778
1779                 retval = portage.doebuild(ebuild_path, "fetch",
1780                         self.settings["ROOT"], self.settings, debug=debug,
1781                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782                         mydbapi=portdb, tree="porttree")
1783
1784                 if retval != os.EX_OK:
1785                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786                         eerror(msg, phase="unpack", key=pkg.cpv)
1787
1788                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1789                 return retval
1790
1791 class PollConstants(object):
1792
1793         """
1794         Provides POLL* constants that are equivalent to those from the
1795         select module, for use by PollSelectAdapter.
1796         """
1797
1798         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1799         v = 1
1800         for k in names:
1801                 locals()[k] = getattr(select, k, v)
1802                 v *= 2
1803         del k, v
1804
1805 class AsynchronousTask(SlotObject):
1806         """
1807         Subclasses override _wait() and _poll() so that calls
1808         to public methods can be wrapped for implementing
1809         hooks such as exit listener notification.
1810
1811         Sublasses should call self.wait() to notify exit listeners after
1812         the task is complete and self.returncode has been set.
1813         """
1814
1815         __slots__ = ("background", "cancelled", "returncode") + \
1816                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1817
1818         def start(self):
1819                 """
1820                 Start an asynchronous task and then return as soon as possible.
1821                 """
1822                 self._start_hook()
1823                 self._start()
1824
1825         def _start(self):
1826                 raise NotImplementedError(self)
1827
1828         def isAlive(self):
1829                 return self.returncode is None
1830
1831         def poll(self):
1832                 self._wait_hook()
1833                 return self._poll()
1834
1835         def _poll(self):
1836                 return self.returncode
1837
1838         def wait(self):
1839                 if self.returncode is None:
1840                         self._wait()
1841                 self._wait_hook()
1842                 return self.returncode
1843
1844         def _wait(self):
1845                 return self.returncode
1846
1847         def cancel(self):
1848                 self.cancelled = True
1849                 self.wait()
1850
1851         def addStartListener(self, f):
1852                 """
1853                 The function will be called with one argument, a reference to self.
1854                 """
1855                 if self._start_listeners is None:
1856                         self._start_listeners = []
1857                 self._start_listeners.append(f)
1858
1859         def removeStartListener(self, f):
1860                 if self._start_listeners is None:
1861                         return
1862                 self._start_listeners.remove(f)
1863
1864         def _start_hook(self):
1865                 if self._start_listeners is not None:
1866                         start_listeners = self._start_listeners
1867                         self._start_listeners = None
1868
1869                         for f in start_listeners:
1870                                 f(self)
1871
1872         def addExitListener(self, f):
1873                 """
1874                 The function will be called with one argument, a reference to self.
1875                 """
1876                 if self._exit_listeners is None:
1877                         self._exit_listeners = []
1878                 self._exit_listeners.append(f)
1879
1880         def removeExitListener(self, f):
1881                 if self._exit_listeners is None:
1882                         if self._exit_listener_stack is not None:
1883                                 self._exit_listener_stack.remove(f)
1884                         return
1885                 self._exit_listeners.remove(f)
1886
1887         def _wait_hook(self):
1888                 """
1889                 Call this method after the task completes, just before returning
1890                 the returncode from wait() or poll(). This hook is
1891                 used to trigger exit listeners when the returncode first
1892                 becomes available.
1893                 """
1894                 if self.returncode is not None and \
1895                         self._exit_listeners is not None:
1896
1897                         # This prevents recursion, in case one of the
1898                         # exit handlers triggers this method again by
1899                         # calling wait(). Use a stack that gives
1900                         # removeExitListener() an opportunity to consume
1901                         # listeners from the stack, before they can get
1902                         # called below. This is necessary because a call
1903                         # to one exit listener may result in a call to
1904                         # removeExitListener() for another listener on
1905                         # the stack. That listener needs to be removed
1906                         # from the stack since it would be inconsistent
1907                         # to call it after it has been been passed into
1908                         # removeExitListener().
1909                         self._exit_listener_stack = self._exit_listeners
1910                         self._exit_listeners = None
1911
1912                         self._exit_listener_stack.reverse()
1913                         while self._exit_listener_stack:
1914                                 self._exit_listener_stack.pop()(self)
1915
1916 class AbstractPollTask(AsynchronousTask):
1917
1918         __slots__ = ("scheduler",) + \
1919                 ("_registered",)
1920
1921         _bufsize = 4096
1922         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1924                 _exceptional_events
1925
1926         def _unregister(self):
1927                 raise NotImplementedError(self)
1928
1929         def _unregister_if_appropriate(self, event):
1930                 if self._registered:
1931                         if event & self._exceptional_events:
1932                                 self._unregister()
1933                                 self.cancel()
1934                         elif event & PollConstants.POLLHUP:
1935                                 self._unregister()
1936                                 self.wait()
1937
1938 class PipeReader(AbstractPollTask):
1939
1940         """
1941         Reads output from one or more files and saves it in memory,
1942         for retrieval via the getvalue() method. This is driven by
1943         the scheduler's poll() loop, so it runs entirely within the
1944         current process.
1945         """
1946
1947         __slots__ = ("input_files",) + \
1948                 ("_read_data", "_reg_ids")
1949
1950         def _start(self):
1951                 self._reg_ids = set()
1952                 self._read_data = []
1953                 for k, f in self.input_files.iteritems():
1954                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1957                                 self._registered_events, self._output_handler))
1958                 self._registered = True
1959
1960         def isAlive(self):
1961                 return self._registered
1962
1963         def cancel(self):
1964                 if self.returncode is None:
1965                         self.returncode = 1
1966                         self.cancelled = True
1967                 self.wait()
1968
1969         def _wait(self):
1970                 if self.returncode is not None:
1971                         return self.returncode
1972
1973                 if self._registered:
1974                         self.scheduler.schedule(self._reg_ids)
1975                         self._unregister()
1976
1977                 self.returncode = os.EX_OK
1978                 return self.returncode
1979
1980         def getvalue(self):
1981                 """Retrieve the entire contents"""
1982                 if sys.hexversion >= 0x3000000:
1983                         return bytes().join(self._read_data)
1984                 return "".join(self._read_data)
1985
1986         def close(self):
1987                 """Free the memory buffer."""
1988                 self._read_data = None
1989
1990         def _output_handler(self, fd, event):
1991
1992                 if event & PollConstants.POLLIN:
1993
1994                         for f in self.input_files.itervalues():
1995                                 if fd == f.fileno():
1996                                         break
1997
1998                         buf = array.array('B')
1999                         try:
2000                                 buf.fromfile(f, self._bufsize)
2001                         except EOFError:
2002                                 pass
2003
2004                         if buf:
2005                                 self._read_data.append(buf.tostring())
2006                         else:
2007                                 self._unregister()
2008                                 self.wait()
2009
2010                 self._unregister_if_appropriate(event)
2011                 return self._registered
2012
2013         def _unregister(self):
2014                 """
2015                 Unregister from the scheduler and close open files.
2016                 """
2017
2018                 self._registered = False
2019
2020                 if self._reg_ids is not None:
2021                         for reg_id in self._reg_ids:
2022                                 self.scheduler.unregister(reg_id)
2023                         self._reg_ids = None
2024
2025                 if self.input_files is not None:
2026                         for f in self.input_files.itervalues():
2027                                 f.close()
2028                         self.input_files = None
2029
2030 class CompositeTask(AsynchronousTask):
2031
2032         __slots__ = ("scheduler",) + ("_current_task",)
2033
2034         def isAlive(self):
2035                 return self._current_task is not None
2036
2037         def cancel(self):
2038                 self.cancelled = True
2039                 if self._current_task is not None:
2040                         self._current_task.cancel()
2041
2042         def _poll(self):
2043                 """
2044                 This does a loop calling self._current_task.poll()
2045                 repeatedly as long as the value of self._current_task
2046                 keeps changing. It calls poll() a maximum of one time
2047                 for a given self._current_task instance. This is useful
2048                 since calling poll() on a task can trigger advance to
2049                 the next task could eventually lead to the returncode
2050                 being set in cases when polling only a single task would
2051                 not have the same effect.
2052                 """
2053
2054                 prev = None
2055                 while True:
2056                         task = self._current_task
2057                         if task is None or task is prev:
2058                                 # don't poll the same task more than once
2059                                 break
2060                         task.poll()
2061                         prev = task
2062
2063                 return self.returncode
2064
2065         def _wait(self):
2066
2067                 prev = None
2068                 while True:
2069                         task = self._current_task
2070                         if task is None:
2071                                 # don't wait for the same task more than once
2072                                 break
2073                         if task is prev:
2074                                 # Before the task.wait() method returned, an exit
2075                                 # listener should have set self._current_task to either
2076                                 # a different task or None. Something is wrong.
2077                                 raise AssertionError("self._current_task has not " + \
2078                                         "changed since calling wait", self, task)
2079                         task.wait()
2080                         prev = task
2081
2082                 return self.returncode
2083
2084         def _assert_current(self, task):
2085                 """
2086                 Raises an AssertionError if the given task is not the
2087                 same one as self._current_task. This can be useful
2088                 for detecting bugs.
2089                 """
2090                 if task is not self._current_task:
2091                         raise AssertionError("Unrecognized task: %s" % (task,))
2092
2093         def _default_exit(self, task):
2094                 """
2095                 Calls _assert_current() on the given task and then sets the
2096                 composite returncode attribute if task.returncode != os.EX_OK.
2097                 If the task failed then self._current_task will be set to None.
2098                 Subclasses can use this as a generic task exit callback.
2099
2100                 @rtype: int
2101                 @returns: The task.returncode attribute.
2102                 """
2103                 self._assert_current(task)
2104                 if task.returncode != os.EX_OK:
2105                         self.returncode = task.returncode
2106                         self._current_task = None
2107                 return task.returncode
2108
2109         def _final_exit(self, task):
2110                 """
2111                 Assumes that task is the final task of this composite task.
2112                 Calls _default_exit() and sets self.returncode to the task's
2113                 returncode and sets self._current_task to None.
2114                 """
2115                 self._default_exit(task)
2116                 self._current_task = None
2117                 self.returncode = task.returncode
2118                 return self.returncode
2119
2120         def _default_final_exit(self, task):
2121                 """
2122                 This calls _final_exit() and then wait().
2123
2124                 Subclasses can use this as a generic final task exit callback.
2125
2126                 """
2127                 self._final_exit(task)
2128                 return self.wait()
2129
2130         def _start_task(self, task, exit_handler):
2131                 """
2132                 Register exit handler for the given task, set it
2133                 as self._current_task, and call task.start().
2134
2135                 Subclasses can use this as a generic way to start
2136                 a task.
2137
2138                 """
2139                 task.addExitListener(exit_handler)
2140                 self._current_task = task
2141                 task.start()
2142
2143 class TaskSequence(CompositeTask):
2144         """
2145         A collection of tasks that executes sequentially. Each task
2146         must have a addExitListener() method that can be used as
2147         a means to trigger movement from one task to the next.
2148         """
2149
2150         __slots__ = ("_task_queue",)
2151
2152         def __init__(self, **kwargs):
2153                 AsynchronousTask.__init__(self, **kwargs)
2154                 self._task_queue = deque()
2155
2156         def add(self, task):
2157                 self._task_queue.append(task)
2158
2159         def _start(self):
2160                 self._start_next_task()
2161
2162         def cancel(self):
2163                 self._task_queue.clear()
2164                 CompositeTask.cancel(self)
2165
2166         def _start_next_task(self):
2167                 self._start_task(self._task_queue.popleft(),
2168                         self._task_exit_handler)
2169
2170         def _task_exit_handler(self, task):
2171                 if self._default_exit(task) != os.EX_OK:
2172                         self.wait()
2173                 elif self._task_queue:
2174                         self._start_next_task()
2175                 else:
2176                         self._final_exit(task)
2177                         self.wait()
2178
2179 class SubProcess(AbstractPollTask):
2180
2181         __slots__ = ("pid",) + \
2182                 ("_files", "_reg_id")
2183
2184         # A file descriptor is required for the scheduler to monitor changes from
2185         # inside a poll() loop. When logging is not enabled, create a pipe just to
2186         # serve this purpose alone.
2187         _dummy_pipe_fd = 9
2188
2189         def _poll(self):
2190                 if self.returncode is not None:
2191                         return self.returncode
2192                 if self.pid is None:
2193                         return self.returncode
2194                 if self._registered:
2195                         return self.returncode
2196
2197                 try:
2198                         retval = os.waitpid(self.pid, os.WNOHANG)
2199                 except OSError, e:
2200                         if e.errno != errno.ECHILD:
2201                                 raise
2202                         del e
2203                         retval = (self.pid, 1)
2204
2205                 if retval == (0, 0):
2206                         return None
2207                 self._set_returncode(retval)
2208                 return self.returncode
2209
2210         def cancel(self):
2211                 if self.isAlive():
2212                         try:
2213                                 os.kill(self.pid, signal.SIGTERM)
2214                         except OSError, e:
2215                                 if e.errno != errno.ESRCH:
2216                                         raise
2217                                 del e
2218
2219                 self.cancelled = True
2220                 if self.pid is not None:
2221                         self.wait()
2222                 return self.returncode
2223
2224         def isAlive(self):
2225                 return self.pid is not None and \
2226                         self.returncode is None
2227
2228         def _wait(self):
2229
2230                 if self.returncode is not None:
2231                         return self.returncode
2232
2233                 if self._registered:
2234                         self.scheduler.schedule(self._reg_id)
2235                         self._unregister()
2236                         if self.returncode is not None:
2237                                 return self.returncode
2238
2239                 try:
2240                         wait_retval = os.waitpid(self.pid, 0)
2241                 except OSError, e:
2242                         if e.errno != errno.ECHILD:
2243                                 raise
2244                         del e
2245                         self._set_returncode((self.pid, 1))
2246                 else:
2247                         self._set_returncode(wait_retval)
2248
2249                 return self.returncode
2250
2251         def _unregister(self):
2252                 """
2253                 Unregister from the scheduler and close open files.
2254                 """
2255
2256                 self._registered = False
2257
2258                 if self._reg_id is not None:
2259                         self.scheduler.unregister(self._reg_id)
2260                         self._reg_id = None
2261
2262                 if self._files is not None:
2263                         for f in self._files.itervalues():
2264                                 f.close()
2265                         self._files = None
2266
2267         def _set_returncode(self, wait_retval):
2268
2269                 retval = wait_retval[1]
2270
2271                 if retval != os.EX_OK:
2272                         if retval & 0xff:
2273                                 retval = (retval & 0xff) << 8
2274                         else:
2275                                 retval = retval >> 8
2276
2277                 self.returncode = retval
2278
2279 class SpawnProcess(SubProcess):
2280
2281         """
2282         Constructor keyword args are passed into portage.process.spawn().
2283         The required "args" keyword argument will be passed as the first
2284         spawn() argument.
2285         """
2286
2287         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2288                 "uid", "gid", "groups", "umask", "logfile",
2289                 "path_lookup", "pre_exec")
2290
2291         __slots__ = ("args",) + \
2292                 _spawn_kwarg_names
2293
2294         _file_names = ("log", "process", "stdout")
2295         _files_dict = slot_dict_class(_file_names, prefix="")
2296
2297         def _start(self):
2298
2299                 if self.cancelled:
2300                         return
2301
2302                 if self.fd_pipes is None:
2303                         self.fd_pipes = {}
2304                 fd_pipes = self.fd_pipes
2305                 fd_pipes.setdefault(0, sys.stdin.fileno())
2306                 fd_pipes.setdefault(1, sys.stdout.fileno())
2307                 fd_pipes.setdefault(2, sys.stderr.fileno())
2308
2309                 # flush any pending output
2310                 for fd in fd_pipes.itervalues():
2311                         if fd == sys.stdout.fileno():
2312                                 sys.stdout.flush()
2313                         if fd == sys.stderr.fileno():
2314                                 sys.stderr.flush()
2315
2316                 logfile = self.logfile
2317                 self._files = self._files_dict()
2318                 files = self._files
2319
2320                 master_fd, slave_fd = self._pipe(fd_pipes)
2321                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2322                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2323
2324                 null_input = None
2325                 fd_pipes_orig = fd_pipes.copy()
2326                 if self.background:
2327                         # TODO: Use job control functions like tcsetpgrp() to control
2328                         # access to stdin. Until then, use /dev/null so that any
2329                         # attempts to read from stdin will immediately return EOF
2330                         # instead of blocking indefinitely.
2331                         null_input = open('/dev/null', 'rb')
2332                         fd_pipes[0] = null_input.fileno()
2333                 else:
2334                         fd_pipes[0] = fd_pipes_orig[0]
2335
2336                 files.process = os.fdopen(master_fd, 'rb')
2337                 if logfile is not None:
2338
2339                         fd_pipes[1] = slave_fd
2340                         fd_pipes[2] = slave_fd
2341
2342                         files.log = open(logfile, mode='ab')
2343                         portage.util.apply_secpass_permissions(logfile,
2344                                 uid=portage.portage_uid, gid=portage.portage_gid,
2345                                 mode=0660)
2346
2347                         if not self.background:
2348                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2349
2350                         output_handler = self._output_handler
2351
2352                 else:
2353
2354                         # Create a dummy pipe so the scheduler can monitor
2355                         # the process from inside a poll() loop.
2356                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2357                         if self.background:
2358                                 fd_pipes[1] = slave_fd
2359                                 fd_pipes[2] = slave_fd
2360                         output_handler = self._dummy_handler
2361
2362                 kwargs = {}
2363                 for k in self._spawn_kwarg_names:
2364                         v = getattr(self, k)
2365                         if v is not None:
2366                                 kwargs[k] = v
2367
2368                 kwargs["fd_pipes"] = fd_pipes
2369                 kwargs["returnpid"] = True
2370                 kwargs.pop("logfile", None)
2371
2372                 self._reg_id = self.scheduler.register(files.process.fileno(),
2373                         self._registered_events, output_handler)
2374                 self._registered = True
2375
2376                 retval = self._spawn(self.args, **kwargs)
2377
2378                 os.close(slave_fd)
2379                 if null_input is not None:
2380                         null_input.close()
2381
2382                 if isinstance(retval, int):
2383                         # spawn failed
2384                         self._unregister()
2385                         self.returncode = retval
2386                         self.wait()
2387                         return
2388
2389                 self.pid = retval[0]
2390                 portage.process.spawned_pids.remove(self.pid)
2391
2392         def _pipe(self, fd_pipes):
2393                 """
2394                 @type fd_pipes: dict
2395                 @param fd_pipes: pipes from which to copy terminal size if desired.
2396                 """
2397                 return os.pipe()
2398
2399         def _spawn(self, args, **kwargs):
2400                 return portage.process.spawn(args, **kwargs)
2401
2402         def _output_handler(self, fd, event):
2403
2404                 if event & PollConstants.POLLIN:
2405
2406                         files = self._files
2407                         buf = array.array('B')
2408                         try:
2409                                 buf.fromfile(files.process, self._bufsize)
2410                         except EOFError:
2411                                 pass
2412
2413                         if buf:
2414                                 if not self.background:
2415                                         buf.tofile(files.stdout)
2416                                         files.stdout.flush()
2417                                 buf.tofile(files.log)
2418                                 files.log.flush()
2419                         else:
2420                                 self._unregister()
2421                                 self.wait()
2422
2423                 self._unregister_if_appropriate(event)
2424                 return self._registered
2425
2426         def _dummy_handler(self, fd, event):
2427                 """
2428                 This method is mainly interested in detecting EOF, since
2429                 the only purpose of the pipe is to allow the scheduler to
2430                 monitor the process from inside a poll() loop.
2431                 """
2432
2433                 if event & PollConstants.POLLIN:
2434
2435                         buf = array.array('B')
2436                         try:
2437                                 buf.fromfile(self._files.process, self._bufsize)
2438                         except EOFError:
2439                                 pass
2440
2441                         if buf:
2442                                 pass
2443                         else:
2444                                 self._unregister()
2445                                 self.wait()
2446
2447                 self._unregister_if_appropriate(event)
2448                 return self._registered
2449
2450 class MiscFunctionsProcess(SpawnProcess):
2451         """
2452         Spawns misc-functions.sh with an existing ebuild environment.
2453         """
2454
2455         __slots__ = ("commands", "phase", "pkg", "settings")
2456
2457         def _start(self):
2458                 settings = self.settings
2459                 settings.pop("EBUILD_PHASE", None)
2460                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2461                 misc_sh_binary = os.path.join(portage_bin_path,
2462                         os.path.basename(portage.const.MISC_SH_BINARY))
2463
2464                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2465                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2466
2467                 portage._doebuild_exit_status_unlink(
2468                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2469
2470                 SpawnProcess._start(self)
2471
2472         def _spawn(self, args, **kwargs):
2473                 settings = self.settings
2474                 debug = settings.get("PORTAGE_DEBUG") == "1"
2475                 return portage.spawn(" ".join(args), settings,
2476                         debug=debug, **kwargs)
2477
2478         def _set_returncode(self, wait_retval):
2479                 SpawnProcess._set_returncode(self, wait_retval)
2480                 self.returncode = portage._doebuild_exit_status_check_and_log(
2481                         self.settings, self.phase, self.returncode)
2482
2483 class EbuildFetcher(SpawnProcess):
2484
2485         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2486                 ("_build_dir",)
2487
2488         def _start(self):
2489
2490                 root_config = self.pkg.root_config
2491                 portdb = root_config.trees["porttree"].dbapi
2492                 ebuild_path = portdb.findname(self.pkg.cpv)
2493                 settings = self.config_pool.allocate()
2494                 settings.setcpv(self.pkg)
2495
2496                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2497                 # should not be touched since otherwise it could interfere with
2498                 # another instance of the same cpv concurrently being built for a
2499                 # different $ROOT (currently, builds only cooperate with prefetchers
2500                 # that are spawned for the same $ROOT).
2501                 if not self.prefetch:
2502                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2503                         self._build_dir.lock()
2504                         self._build_dir.clean()
2505                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2506                         if self.logfile is None:
2507                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2508
2509                 phase = "fetch"
2510                 if self.fetchall:
2511                         phase = "fetchall"
2512
2513                 # If any incremental variables have been overridden
2514                 # via the environment, those values need to be passed
2515                 # along here so that they are correctly considered by
2516                 # the config instance in the subproccess.
2517                 fetch_env = os.environ.copy()
2518
2519                 nocolor = settings.get("NOCOLOR")
2520                 if nocolor is not None:
2521                         fetch_env["NOCOLOR"] = nocolor
2522
2523                 fetch_env["PORTAGE_NICENESS"] = "0"
2524                 if self.prefetch:
2525                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2526
2527                 ebuild_binary = os.path.join(
2528                         settings["PORTAGE_BIN_PATH"], "ebuild")
2529
2530                 fetch_args = [ebuild_binary, ebuild_path, phase]
2531                 debug = settings.get("PORTAGE_DEBUG") == "1"
2532                 if debug:
2533                         fetch_args.append("--debug")
2534
2535                 self.args = fetch_args
2536                 self.env = fetch_env
2537                 SpawnProcess._start(self)
2538
2539         def _pipe(self, fd_pipes):
2540                 """When appropriate, use a pty so that fetcher progress bars,
2541                 like wget has, will work properly."""
2542                 if self.background or not sys.stdout.isatty():
2543                         # When the output only goes to a log file,
2544                         # there's no point in creating a pty.
2545                         return os.pipe()
2546                 stdout_pipe = fd_pipes.get(1)
2547                 got_pty, master_fd, slave_fd = \
2548                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2549                 return (master_fd, slave_fd)
2550
2551         def _set_returncode(self, wait_retval):
2552                 SpawnProcess._set_returncode(self, wait_retval)
2553                 # Collect elog messages that might have been
2554                 # created by the pkg_nofetch phase.
2555                 if self._build_dir is not None:
2556                         # Skip elog messages for prefetch, in order to avoid duplicates.
2557                         if not self.prefetch and self.returncode != os.EX_OK:
2558                                 elog_out = None
2559                                 if self.logfile is not None:
2560                                         if self.background:
2561                                                 elog_out = open(self.logfile, 'a')
2562                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2563                                 if self.logfile is not None:
2564                                         msg += ", Log file:"
2565                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2566                                 if self.logfile is not None:
2567                                         eerror(" '%s'" % (self.logfile,),
2568                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2569                                 if elog_out is not None:
2570                                         elog_out.close()
2571                         if not self.prefetch:
2572                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2573                         features = self._build_dir.settings.features
2574                         if self.returncode == os.EX_OK:
2575                                 self._build_dir.clean()
2576                         self._build_dir.unlock()
2577                         self.config_pool.deallocate(self._build_dir.settings)
2578                         self._build_dir = None
2579
2580 class EbuildBuildDir(SlotObject):
2581
2582         __slots__ = ("dir_path", "pkg", "settings",
2583                 "locked", "_catdir", "_lock_obj")
2584
2585         def __init__(self, **kwargs):
2586                 SlotObject.__init__(self, **kwargs)
2587                 self.locked = False
2588
2589         def lock(self):
2590                 """
2591                 This raises an AlreadyLocked exception if lock() is called
2592                 while a lock is already held. In order to avoid this, call
2593                 unlock() or check whether the "locked" attribute is True
2594                 or False before calling lock().
2595                 """
2596                 if self._lock_obj is not None:
2597                         raise self.AlreadyLocked((self._lock_obj,))
2598
2599                 dir_path = self.dir_path
2600                 if dir_path is None:
2601                         root_config = self.pkg.root_config
2602                         portdb = root_config.trees["porttree"].dbapi
2603                         ebuild_path = portdb.findname(self.pkg.cpv)
2604                         settings = self.settings
2605                         settings.setcpv(self.pkg)
2606                         debug = settings.get("PORTAGE_DEBUG") == "1"
2607                         use_cache = 1 # always true
2608                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2609                                 self.settings, debug, use_cache, portdb)
2610                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2611
2612                 catdir = os.path.dirname(dir_path)
2613                 self._catdir = catdir
2614
2615                 portage.util.ensure_dirs(os.path.dirname(catdir),
2616                         gid=portage.portage_gid,
2617                         mode=070, mask=0)
2618                 catdir_lock = None
2619                 try:
2620                         catdir_lock = portage.locks.lockdir(catdir)
2621                         portage.util.ensure_dirs(catdir,
2622                                 gid=portage.portage_gid,
2623                                 mode=070, mask=0)
2624                         self._lock_obj = portage.locks.lockdir(dir_path)
2625                 finally:
2626                         self.locked = self._lock_obj is not None
2627                         if catdir_lock is not None:
2628                                 portage.locks.unlockdir(catdir_lock)
2629
2630         def clean(self):
2631                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2632                 by keepwork or keeptemp in FEATURES."""
2633                 settings = self.settings
2634                 features = settings.features
2635                 if not ("keepwork" in features or "keeptemp" in features):
2636                         try:
2637                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2638                         except EnvironmentError, e:
2639                                 if e.errno != errno.ENOENT:
2640                                         raise
2641                                 del e
2642
2643         def unlock(self):
2644                 if self._lock_obj is None:
2645                         return
2646
2647                 portage.locks.unlockdir(self._lock_obj)
2648                 self._lock_obj = None
2649                 self.locked = False
2650
2651                 catdir = self._catdir
2652                 catdir_lock = None
2653                 try:
2654                         catdir_lock = portage.locks.lockdir(catdir)
2655                 finally:
2656                         if catdir_lock:
2657                                 try:
2658                                         os.rmdir(catdir)
2659                                 except OSError, e:
2660                                         if e.errno not in (errno.ENOENT,
2661                                                 errno.ENOTEMPTY, errno.EEXIST):
2662                                                 raise
2663                                         del e
2664                                 portage.locks.unlockdir(catdir_lock)
2665
2666         class AlreadyLocked(portage.exception.PortageException):
2667                 pass
2668
2669 class EbuildBuild(CompositeTask):
2670
2671         __slots__ = ("args_set", "config_pool", "find_blockers",
2672                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2673                 "prefetcher", "settings", "world_atom") + \
2674                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2675
2676         def _start(self):
2677
2678                 logger = self.logger
2679                 opts = self.opts
2680                 pkg = self.pkg
2681                 settings = self.settings
2682                 world_atom = self.world_atom
2683                 root_config = pkg.root_config
2684                 tree = "porttree"
2685                 self._tree = tree
2686                 portdb = root_config.trees[tree].dbapi
2687                 settings.setcpv(pkg)
2688                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2689                 ebuild_path = portdb.findname(self.pkg.cpv)
2690                 self._ebuild_path = ebuild_path
2691
2692                 prefetcher = self.prefetcher
2693                 if prefetcher is None:
2694                         pass
2695                 elif not prefetcher.isAlive():
2696                         prefetcher.cancel()
2697                 elif prefetcher.poll() is None:
2698
2699                         waiting_msg = "Fetching files " + \
2700                                 "in the background. " + \
2701                                 "To view fetch progress, run `tail -f " + \
2702                                 "/var/log/emerge-fetch.log` in another " + \
2703                                 "terminal."
2704                         msg_prefix = colorize("GOOD", " * ")
2705                         from textwrap import wrap
2706                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2707                                 for line in wrap(waiting_msg, 65))
2708                         if not self.background:
2709                                 writemsg(waiting_msg, noiselevel=-1)
2710
2711                         self._current_task = prefetcher
2712                         prefetcher.addExitListener(self._prefetch_exit)
2713                         return
2714
2715                 self._prefetch_exit(prefetcher)
2716
2717         def _prefetch_exit(self, prefetcher):
2718
2719                 opts = self.opts
2720                 pkg = self.pkg
2721                 settings = self.settings
2722
2723                 if opts.fetchonly:
2724                                 fetcher = EbuildFetchonly(
2725                                         fetch_all=opts.fetch_all_uri,
2726                                         pkg=pkg, pretend=opts.pretend,
2727                                         settings=settings)
2728                                 retval = fetcher.execute()
2729                                 self.returncode = retval
2730                                 self.wait()
2731                                 return
2732
2733                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2734                         fetchall=opts.fetch_all_uri,
2735                         fetchonly=opts.fetchonly,
2736                         background=self.background,
2737                         pkg=pkg, scheduler=self.scheduler)
2738
2739                 self._start_task(fetcher, self._fetch_exit)
2740
2741         def _fetch_exit(self, fetcher):
2742                 opts = self.opts
2743                 pkg = self.pkg
2744
2745                 fetch_failed = False
2746                 if opts.fetchonly:
2747                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2748                 else:
2749                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2750
2751                 if fetch_failed and fetcher.logfile is not None and \
2752                         os.path.exists(fetcher.logfile):
2753                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2754
2755                 if not fetch_failed and fetcher.logfile is not None:
2756                         # Fetch was successful, so remove the fetch log.
2757                         try:
2758                                 os.unlink(fetcher.logfile)
2759                         except OSError:
2760                                 pass
2761
2762                 if fetch_failed or opts.fetchonly:
2763                         self.wait()
2764                         return
2765
2766                 logger = self.logger
2767                 opts = self.opts
2768                 pkg_count = self.pkg_count
2769                 scheduler = self.scheduler
2770                 settings = self.settings
2771                 features = settings.features
2772                 ebuild_path = self._ebuild_path
2773                 system_set = pkg.root_config.sets["system"]
2774
2775                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2776                 self._build_dir.lock()
2777
2778                 # Cleaning is triggered before the setup
2779                 # phase, in portage.doebuild().
2780                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2781                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2782                 short_msg = "emerge: (%s of %s) %s Clean" % \
2783                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2784                 logger.log(msg, short_msg=short_msg)
2785
2786                 #buildsyspkg: Check if we need to _force_ binary package creation
2787                 self._issyspkg = "buildsyspkg" in features and \
2788                                 system_set.findAtomForPackage(pkg) and \
2789                                 not opts.buildpkg
2790
2791                 if opts.buildpkg or self._issyspkg:
2792
2793                         self._buildpkg = True
2794
2795                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2796                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2797                         short_msg = "emerge: (%s of %s) %s Compile" % \
2798                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2799                         logger.log(msg, short_msg=short_msg)
2800
2801                 else:
2802                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2803                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2804                         short_msg = "emerge: (%s of %s) %s Compile" % \
2805                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2806                         logger.log(msg, short_msg=short_msg)
2807
2808                 build = EbuildExecuter(background=self.background, pkg=pkg,
2809                         scheduler=scheduler, settings=settings)
2810                 self._start_task(build, self._build_exit)
2811
2812         def _unlock_builddir(self):
2813                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2814                 self._build_dir.unlock()
2815
2816         def _build_exit(self, build):
2817                 if self._default_exit(build) != os.EX_OK:
2818                         self._unlock_builddir()
2819                         self.wait()
2820                         return
2821
2822                 opts = self.opts
2823                 buildpkg = self._buildpkg
2824
2825                 if not buildpkg:
2826                         self._final_exit(build)
2827                         self.wait()
2828                         return
2829
2830                 if self._issyspkg:
2831                         msg = ">>> This is a system package, " + \
2832                                 "let's pack a rescue tarball.\n"
2833
2834                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2835                         if log_path is not None:
2836                                 log_file = open(log_path, 'a')
2837                                 try:
2838                                         log_file.write(msg)
2839                                 finally:
2840                                         log_file.close()
2841
2842                         if not self.background:
2843                                 portage.writemsg_stdout(msg, noiselevel=-1)
2844
2845                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2846                         scheduler=self.scheduler, settings=self.settings)
2847
2848                 self._start_task(packager, self._buildpkg_exit)
2849
2850         def _buildpkg_exit(self, packager):
2851                 """
2852                 Released build dir lock when there is a failure or
2853                 when in buildpkgonly mode. Otherwise, the lock will
2854                 be released when merge() is called.
2855                 """
2856
2857                 if self._default_exit(packager) != os.EX_OK:
2858                         self._unlock_builddir()
2859                         self.wait()
2860                         return
2861
2862                 if self.opts.buildpkgonly:
2863                         # Need to call "clean" phase for buildpkgonly mode
2864                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2865                         phase = "clean"
2866                         clean_phase = EbuildPhase(background=self.background,
2867                                 pkg=self.pkg, phase=phase,
2868                                 scheduler=self.scheduler, settings=self.settings,
2869                                 tree=self._tree)
2870                         self._start_task(clean_phase, self._clean_exit)
2871                         return
2872
2873                 # Continue holding the builddir lock until
2874                 # after the package has been installed.
2875                 self._current_task = None
2876                 self.returncode = packager.returncode
2877                 self.wait()
2878
2879         def _clean_exit(self, clean_phase):
2880                 if self._final_exit(clean_phase) != os.EX_OK or \
2881                         self.opts.buildpkgonly:
2882                         self._unlock_builddir()
2883                 self.wait()
2884
2885         def install(self):
2886                 """
2887                 Install the package and then clean up and release locks.
2888                 Only call this after the build has completed successfully
2889                 and neither fetchonly nor buildpkgonly mode are enabled.
2890                 """
2891
2892                 find_blockers = self.find_blockers
2893                 ldpath_mtimes = self.ldpath_mtimes
2894                 logger = self.logger
2895                 pkg = self.pkg
2896                 pkg_count = self.pkg_count
2897                 settings = self.settings
2898                 world_atom = self.world_atom
2899                 ebuild_path = self._ebuild_path
2900                 tree = self._tree
2901
2902                 merge = EbuildMerge(find_blockers=self.find_blockers,
2903                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2904                         pkg_count=pkg_count, pkg_path=ebuild_path,
2905                         scheduler=self.scheduler,
2906                         settings=settings, tree=tree, world_atom=world_atom)
2907
2908                 msg = " === (%s of %s) Merging (%s::%s)" % \
2909                         (pkg_count.curval, pkg_count.maxval,
2910                         pkg.cpv, ebuild_path)
2911                 short_msg = "emerge: (%s of %s) %s Merge" % \
2912                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2913                 logger.log(msg, short_msg=short_msg)
2914
2915                 try:
2916                         rval = merge.execute()
2917                 finally:
2918                         self._unlock_builddir()
2919
2920                 return rval
2921
2922 class EbuildExecuter(CompositeTask):
2923
2924         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2925
2926         _phases = ("prepare", "configure", "compile", "test", "install")
2927
2928         _live_eclasses = frozenset([
2929                 "bzr",
2930                 "cvs",
2931                 "darcs",
2932                 "git",
2933                 "mercurial",
2934                 "subversion"
2935         ])
2936
2937         def _start(self):
2938                 self._tree = "porttree"
2939                 pkg = self.pkg
2940                 phase = "clean"
2941                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2942                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2943                 self._start_task(clean_phase, self._clean_phase_exit)
2944
2945         def _clean_phase_exit(self, clean_phase):
2946
2947                 if self._default_exit(clean_phase) != os.EX_OK:
2948                         self.wait()
2949                         return
2950
2951                 pkg = self.pkg
2952                 scheduler = self.scheduler
2953                 settings = self.settings
2954                 cleanup = 1
2955
2956                 # This initializes PORTAGE_LOG_FILE.
2957                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2958
2959                 setup_phase = EbuildPhase(background=self.background,
2960                         pkg=pkg, phase="setup", scheduler=scheduler,
2961                         settings=settings, tree=self._tree)
2962
2963                 setup_phase.addExitListener(self._setup_exit)
2964                 self._current_task = setup_phase
2965                 self.scheduler.scheduleSetup(setup_phase)
2966
2967         def _setup_exit(self, setup_phase):
2968
2969                 if self._default_exit(setup_phase) != os.EX_OK:
2970                         self.wait()
2971                         return
2972
2973                 unpack_phase = EbuildPhase(background=self.background,
2974                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2975                         settings=self.settings, tree=self._tree)
2976
2977                 if self._live_eclasses.intersection(self.pkg.inherited):
2978                         # Serialize $DISTDIR access for live ebuilds since
2979                         # otherwise they can interfere with eachother.
2980
2981                         unpack_phase.addExitListener(self._unpack_exit)
2982                         self._current_task = unpack_phase
2983                         self.scheduler.scheduleUnpack(unpack_phase)
2984
2985                 else:
2986                         self._start_task(unpack_phase, self._unpack_exit)
2987
2988         def _unpack_exit(self, unpack_phase):
2989
2990                 if self._default_exit(unpack_phase) != os.EX_OK:
2991                         self.wait()
2992                         return
2993
2994                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2995
2996                 pkg = self.pkg
2997                 phases = self._phases
2998                 eapi = pkg.metadata["EAPI"]
2999                 if eapi in ("0", "1"):
3000                         # skip src_prepare and src_configure
3001                         phases = phases[2:]
3002
3003                 for phase in phases:
3004                         ebuild_phases.add(EbuildPhase(background=self.background,
3005                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3006                                 settings=self.settings, tree=self._tree))
3007
3008                 self._start_task(ebuild_phases, self._default_final_exit)
3009
3010 class EbuildMetadataPhase(SubProcess):
3011
3012         """
3013         Asynchronous interface for the ebuild "depend" phase which is
3014         used to extract metadata from the ebuild.
3015         """
3016
3017         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3018                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3019                 ("_raw_metadata",)
3020
3021         _file_names = ("ebuild",)
3022         _files_dict = slot_dict_class(_file_names, prefix="")
3023         _metadata_fd = 9
3024
3025         def _start(self):
3026                 settings = self.settings
3027                 settings.reset()
3028                 ebuild_path = self.ebuild_path
3029                 debug = settings.get("PORTAGE_DEBUG") == "1"
3030                 master_fd = None
3031                 slave_fd = None
3032                 fd_pipes = None
3033                 if self.fd_pipes is not None:
3034                         fd_pipes = self.fd_pipes.copy()
3035                 else:
3036                         fd_pipes = {}
3037
3038                 fd_pipes.setdefault(0, sys.stdin.fileno())
3039                 fd_pipes.setdefault(1, sys.stdout.fileno())
3040                 fd_pipes.setdefault(2, sys.stderr.fileno())
3041
3042                 # flush any pending output
3043                 for fd in fd_pipes.itervalues():
3044                         if fd == sys.stdout.fileno():
3045                                 sys.stdout.flush()
3046                         if fd == sys.stderr.fileno():
3047                                 sys.stderr.flush()
3048
3049                 fd_pipes_orig = fd_pipes.copy()
3050                 self._files = self._files_dict()
3051                 files = self._files
3052
3053                 master_fd, slave_fd = os.pipe()
3054                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3055                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3056
3057                 fd_pipes[self._metadata_fd] = slave_fd
3058
3059                 self._raw_metadata = []
3060                 files.ebuild = os.fdopen(master_fd, 'r')
3061                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3062                         self._registered_events, self._output_handler)
3063                 self._registered = True
3064
3065                 retval = portage.doebuild(ebuild_path, "depend",
3066                         settings["ROOT"], settings, debug,
3067                         mydbapi=self.portdb, tree="porttree",
3068                         fd_pipes=fd_pipes, returnpid=True)
3069
3070                 os.close(slave_fd)
3071
3072                 if isinstance(retval, int):
3073                         # doebuild failed before spawning
3074                         self._unregister()
3075                         self.returncode = retval
3076                         self.wait()
3077                         return
3078
3079                 self.pid = retval[0]
3080                 portage.process.spawned_pids.remove(self.pid)
3081
3082         def _output_handler(self, fd, event):
3083
3084                 if event & PollConstants.POLLIN:
3085                         self._raw_metadata.append(self._files.ebuild.read())
3086                         if not self._raw_metadata[-1]:
3087                                 self._unregister()
3088                                 self.wait()
3089
3090                 self._unregister_if_appropriate(event)
3091                 return self._registered
3092
3093         def _set_returncode(self, wait_retval):
3094                 SubProcess._set_returncode(self, wait_retval)
3095                 if self.returncode == os.EX_OK:
3096                         metadata_lines = "".join(self._raw_metadata).splitlines()
3097                         if len(portage.auxdbkeys) != len(metadata_lines):
3098                                 # Don't trust bash's returncode if the
3099                                 # number of lines is incorrect.
3100                                 self.returncode = 1
3101                         else:
3102                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3103                                 self.metadata_callback(self.cpv, self.ebuild_path,
3104                                         self.repo_path, metadata, self.ebuild_mtime)
3105
3106 class EbuildProcess(SpawnProcess):
3107
3108         __slots__ = ("phase", "pkg", "settings", "tree")
3109
3110         def _start(self):
3111                 # Don't open the log file during the clean phase since the
3112                 # open file can result in an nfs lock on $T/build.log which
3113                 # prevents the clean phase from removing $T.
3114                 if self.phase not in ("clean", "cleanrm"):
3115                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3116                 SpawnProcess._start(self)
3117
3118         def _pipe(self, fd_pipes):
3119                 stdout_pipe = fd_pipes.get(1)
3120                 got_pty, master_fd, slave_fd = \
3121                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3122                 return (master_fd, slave_fd)
3123
3124         def _spawn(self, args, **kwargs):
3125
3126                 root_config = self.pkg.root_config
3127                 tree = self.tree
3128                 mydbapi = root_config.trees[tree].dbapi
3129                 settings = self.settings
3130                 ebuild_path = settings["EBUILD"]
3131                 debug = settings.get("PORTAGE_DEBUG") == "1"
3132
3133                 rval = portage.doebuild(ebuild_path, self.phase,
3134                         root_config.root, settings, debug,
3135                         mydbapi=mydbapi, tree=tree, **kwargs)
3136
3137                 return rval
3138
3139         def _set_returncode(self, wait_retval):
3140                 SpawnProcess._set_returncode(self, wait_retval)
3141
3142                 if self.phase not in ("clean", "cleanrm"):
3143                         self.returncode = portage._doebuild_exit_status_check_and_log(
3144                                 self.settings, self.phase, self.returncode)
3145
3146                 if self.phase == "test" and self.returncode != os.EX_OK and \
3147                         "test-fail-continue" in self.settings.features:
3148                         self.returncode = os.EX_OK
3149
3150                 portage._post_phase_userpriv_perms(self.settings)
3151
3152 class EbuildPhase(CompositeTask):
3153
3154         __slots__ = ("background", "pkg", "phase",
3155                 "scheduler", "settings", "tree")
3156
3157         _post_phase_cmds = portage._post_phase_cmds
3158
3159         def _start(self):
3160
3161                 ebuild_process = EbuildProcess(background=self.background,
3162                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3163                         settings=self.settings, tree=self.tree)
3164
3165                 self._start_task(ebuild_process, self._ebuild_exit)
3166
3167         def _ebuild_exit(self, ebuild_process):
3168
3169                 if self.phase == "install":
3170                         out = None
3171                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3172                         log_file = None
3173                         if self.background and log_path is not None:
3174                                 log_file = open(log_path, 'a')
3175                                 out = log_file
3176                         try:
3177                                 portage._check_build_log(self.settings, out=out)
3178                         finally:
3179                                 if log_file is not None:
3180                                         log_file.close()
3181
3182                 if self._default_exit(ebuild_process) != os.EX_OK:
3183                         self.wait()
3184                         return
3185
3186                 settings = self.settings
3187
3188                 if self.phase == "install":
3189                         portage._post_src_install_uid_fix(settings)
3190
3191                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3192                 if post_phase_cmds is not None:
3193                         post_phase = MiscFunctionsProcess(background=self.background,
3194                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3195                                 scheduler=self.scheduler, settings=settings)
3196                         self._start_task(post_phase, self._post_phase_exit)
3197                         return
3198
3199                 self.returncode = ebuild_process.returncode
3200                 self._current_task = None
3201                 self.wait()
3202
3203         def _post_phase_exit(self, post_phase):
3204                 if self._final_exit(post_phase) != os.EX_OK:
3205                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3206                                 noiselevel=-1)
3207                 self._current_task = None
3208                 self.wait()
3209                 return
3210
3211 class EbuildBinpkg(EbuildProcess):
3212         """
3213         This assumes that src_install() has successfully completed.
3214         """
3215         __slots__ = ("_binpkg_tmpfile",)
3216
3217         def _start(self):
3218                 self.phase = "package"
3219                 self.tree = "porttree"
3220                 pkg = self.pkg
3221                 root_config = pkg.root_config
3222                 portdb = root_config.trees["porttree"].dbapi
3223                 bintree = root_config.trees["bintree"]
3224                 ebuild_path = portdb.findname(self.pkg.cpv)
3225                 settings = self.settings
3226                 debug = settings.get("PORTAGE_DEBUG") == "1"
3227
3228                 bintree.prevent_collision(pkg.cpv)
3229                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3230                         pkg.cpv + ".tbz2." + str(os.getpid()))
3231                 self._binpkg_tmpfile = binpkg_tmpfile
3232                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3233                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3234
3235                 try:
3236                         EbuildProcess._start(self)
3237                 finally:
3238                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3239
3240         def _set_returncode(self, wait_retval):
3241                 EbuildProcess._set_returncode(self, wait_retval)
3242
3243                 pkg = self.pkg
3244                 bintree = pkg.root_config.trees["bintree"]
3245                 binpkg_tmpfile = self._binpkg_tmpfile
3246                 if self.returncode == os.EX_OK:
3247                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3248
3249 class EbuildMerge(SlotObject):
3250
3251         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3252                 "pkg", "pkg_count", "pkg_path", "pretend",
3253                 "scheduler", "settings", "tree", "world_atom")
3254
3255         def execute(self):
3256                 root_config = self.pkg.root_config
3257                 settings = self.settings
3258                 retval = portage.merge(settings["CATEGORY"],
3259                         settings["PF"], settings["D"],
3260                         os.path.join(settings["PORTAGE_BUILDDIR"],
3261                         "build-info"), root_config.root, settings,
3262                         myebuild=settings["EBUILD"],
3263                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3264                         vartree=root_config.trees["vartree"],
3265                         prev_mtimes=self.ldpath_mtimes,
3266                         scheduler=self.scheduler,
3267                         blockers=self.find_blockers)
3268
3269                 if retval == os.EX_OK:
3270                         self.world_atom(self.pkg)
3271                         self._log_success()
3272
3273                 return retval
3274
3275         def _log_success(self):
3276                 pkg = self.pkg
3277                 pkg_count = self.pkg_count
3278                 pkg_path = self.pkg_path
3279                 logger = self.logger
3280                 if "noclean" not in self.settings.features:
3281                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3282                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3283                         logger.log((" === (%s of %s) " + \
3284                                 "Post-Build Cleaning (%s::%s)") % \
3285                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3286                                 short_msg=short_msg)
3287                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3288                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3289
3290 class PackageUninstall(AsynchronousTask):
3291
3292         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3293
3294         def _start(self):
3295                 try:
3296                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3297                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3298                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3299                                 writemsg_level=self._writemsg_level)
3300                 except UninstallFailure, e:
3301                         self.returncode = e.status
3302                 else:
3303                         self.returncode = os.EX_OK
3304                 self.wait()
3305
3306         def _writemsg_level(self, msg, level=0, noiselevel=0):
3307
3308                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3309                 background = self.background
3310
3311                 if log_path is None:
3312                         if not (background and level < logging.WARNING):
3313                                 portage.util.writemsg_level(msg,
3314                                         level=level, noiselevel=noiselevel)
3315                 else:
3316                         if not background:
3317                                 portage.util.writemsg_level(msg,
3318                                         level=level, noiselevel=noiselevel)
3319
3320                         f = open(log_path, 'a')
3321                         try:
3322                                 f.write(msg)
3323                         finally:
3324                                 f.close()
3325
3326 class Binpkg(CompositeTask):
3327
3328         __slots__ = ("find_blockers",
3329                 "ldpath_mtimes", "logger", "opts",
3330                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3331                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3332                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3333
3334         def _writemsg_level(self, msg, level=0, noiselevel=0):
3335
3336                 if not self.background:
3337                         portage.util.writemsg_level(msg,
3338                                 level=level, noiselevel=noiselevel)
3339
3340                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3341                 if  log_path is not None:
3342                         f = open(log_path, 'a')
3343                         try:
3344                                 f.write(msg)
3345                         finally:
3346                                 f.close()
3347
3348         def _start(self):
3349
3350                 pkg = self.pkg
3351                 settings = self.settings
3352                 settings.setcpv(pkg)
3353                 self._tree = "bintree"
3354                 self._bintree = self.pkg.root_config.trees[self._tree]
3355                 self._verify = not self.opts.pretend
3356
3357                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3358                         "portage", pkg.category, pkg.pf)
3359                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3360                         pkg=pkg, settings=settings)
3361                 self._image_dir = os.path.join(dir_path, "image")
3362                 self._infloc = os.path.join(dir_path, "build-info")
3363                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3364                 settings["EBUILD"] = self._ebuild_path
3365                 debug = settings.get("PORTAGE_DEBUG") == "1"
3366                 portage.doebuild_environment(self._ebuild_path, "setup",
3367                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3368                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3369
3370                 # The prefetcher has already completed or it
3371                 # could be running now. If it's running now,
3372                 # wait for it to complete since it holds
3373                 # a lock on the file being fetched. The
3374                 # portage.locks functions are only designed
3375                 # to work between separate processes. Since
3376                 # the lock is held by the current process,
3377                 # use the scheduler and fetcher methods to
3378                 # synchronize with the fetcher.
3379                 prefetcher = self.prefetcher
3380                 if prefetcher is None:
3381                         pass
3382                 elif not prefetcher.isAlive():
3383                         prefetcher.cancel()
3384                 elif prefetcher.poll() is None:
3385
3386                         waiting_msg = ("Fetching '%s' " + \
3387                                 "in the background. " + \
3388                                 "To view fetch progress, run `tail -f " + \
3389                                 "/var/log/emerge-fetch.log` in another " + \
3390                                 "terminal.") % prefetcher.pkg_path
3391                         msg_prefix = colorize("GOOD", " * ")
3392                         from textwrap import wrap
3393                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3394                                 for line in wrap(waiting_msg, 65))
3395                         if not self.background:
3396                                 writemsg(waiting_msg, noiselevel=-1)
3397
3398                         self._current_task = prefetcher
3399                         prefetcher.addExitListener(self._prefetch_exit)
3400                         return
3401
3402                 self._prefetch_exit(prefetcher)
3403
3404         def _prefetch_exit(self, prefetcher):
3405
3406                 pkg = self.pkg
3407                 pkg_count = self.pkg_count
3408                 if not (self.opts.pretend or self.opts.fetchonly):
3409                         self._build_dir.lock()
3410                         try:
3411                                 shutil.rmtree(self._build_dir.dir_path)
3412                         except EnvironmentError, e:
3413                                 if e.errno != errno.ENOENT:
3414                                         raise
3415                                 del e
3416                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3417                 fetcher = BinpkgFetcher(background=self.background,
3418                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3419                         pretend=self.opts.pretend, scheduler=self.scheduler)
3420                 pkg_path = fetcher.pkg_path
3421                 self._pkg_path = pkg_path
3422
3423                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3424
3425                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3426                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3427                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3428                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3429                         self.logger.log(msg, short_msg=short_msg)
3430                         self._start_task(fetcher, self._fetcher_exit)
3431                         return
3432
3433                 self._fetcher_exit(fetcher)
3434
3435         def _fetcher_exit(self, fetcher):
3436
3437                 # The fetcher only has a returncode when
3438                 # --getbinpkg is enabled.
3439                 if fetcher.returncode is not None:
3440                         self._fetched_pkg = True
3441                         if self._default_exit(fetcher) != os.EX_OK:
3442                                 self._unlock_builddir()
3443                                 self.wait()
3444                                 return
3445
3446                 if self.opts.pretend:
3447                         self._current_task = None
3448                         self.returncode = os.EX_OK
3449                         self.wait()
3450                         return
3451
3452                 verifier = None
3453                 if self._verify:
3454                         logfile = None
3455                         if self.background:
3456                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3457                         verifier = BinpkgVerifier(background=self.background,
3458                                 logfile=logfile, pkg=self.pkg)
3459                         self._start_task(verifier, self._verifier_exit)
3460                         return
3461
3462                 self._verifier_exit(verifier)
3463
3464         def _verifier_exit(self, verifier):
3465                 if verifier is not None and \
3466                         self._default_exit(verifier) != os.EX_OK:
3467                         self._unlock_builddir()
3468                         self.wait()
3469                         return
3470
3471                 logger = self.logger
3472                 pkg = self.pkg
3473                 pkg_count = self.pkg_count
3474                 pkg_path = self._pkg_path
3475
3476                 if self._fetched_pkg:
3477                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3478
3479                 if self.opts.fetchonly:
3480                         self._current_task = None
3481                         self.returncode = os.EX_OK
3482                         self.wait()
3483                         return
3484
3485                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3486                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3487                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3488                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3489                 logger.log(msg, short_msg=short_msg)
3490
3491                 phase = "clean"
3492                 settings = self.settings
3493                 ebuild_phase = EbuildPhase(background=self.background,
3494                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3495                         settings=settings, tree=self._tree)
3496
3497                 self._start_task(ebuild_phase, self._clean_exit)
3498
3499         def _clean_exit(self, clean_phase):
3500                 if self._default_exit(clean_phase) != os.EX_OK:
3501                         self._unlock_builddir()
3502                         self.wait()
3503                         return
3504
3505                 dir_path = self._build_dir.dir_path
3506
3507                 try:
3508                         shutil.rmtree(dir_path)
3509                 except (IOError, OSError), e:
3510                         if e.errno != errno.ENOENT:
3511                                 raise
3512                         del e
3513
3514                 infloc = self._infloc
3515                 pkg = self.pkg
3516                 pkg_path = self._pkg_path
3517
3518                 dir_mode = 0755
3519                 for mydir in (dir_path, self._image_dir, infloc):
3520                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3521                                 gid=portage.data.portage_gid, mode=dir_mode)
3522
3523                 # This initializes PORTAGE_LOG_FILE.
3524                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3525                 self._writemsg_level(">>> Extracting info\n")
3526
3527                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3528                 check_missing_metadata = ("CATEGORY", "PF")
3529                 missing_metadata = set()
3530                 for k in check_missing_metadata:
3531                         v = pkg_xpak.getfile(k)
3532                         if not v:
3533                                 missing_metadata.add(k)
3534
3535                 pkg_xpak.unpackinfo(infloc)
3536                 for k in missing_metadata:
3537                         if k == "CATEGORY":
3538                                 v = pkg.category
3539                         elif k == "PF":
3540                                 v = pkg.pf
3541                         else:
3542                                 continue
3543
3544                         f = open(os.path.join(infloc, k), 'wb')
3545                         try:
3546                                 f.write(v + "\n")
3547                         finally:
3548                                 f.close()
3549
3550                 # Store the md5sum in the vdb.
3551                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3552                 try:
3553                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3554                 finally:
3555                         f.close()
3556
3557                 # This gives bashrc users an opportunity to do various things
3558                 # such as remove binary packages after they're installed.
3559                 settings = self.settings
3560                 settings.setcpv(self.pkg)
3561                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3562                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3563
3564                 phase = "setup"
3565                 setup_phase = EbuildPhase(background=self.background,
3566                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3567                         settings=settings, tree=self._tree)
3568
3569                 setup_phase.addExitListener(self._setup_exit)
3570                 self._current_task = setup_phase
3571                 self.scheduler.scheduleSetup(setup_phase)
3572
3573         def _setup_exit(self, setup_phase):
3574                 if self._default_exit(setup_phase) != os.EX_OK:
3575                         self._unlock_builddir()
3576                         self.wait()
3577                         return
3578
3579                 extractor = BinpkgExtractorAsync(background=self.background,
3580                         image_dir=self._image_dir,
3581                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3582                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3583                 self._start_task(extractor, self._extractor_exit)
3584
3585         def _extractor_exit(self, extractor):
3586                 if self._final_exit(extractor) != os.EX_OK:
3587                         self._unlock_builddir()
3588                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3589                                 noiselevel=-1)
3590                 self.wait()
3591
3592         def _unlock_builddir(self):
3593                 if self.opts.pretend or self.opts.fetchonly:
3594                         return
3595                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3596                 self._build_dir.unlock()
3597
3598         def install(self):
3599
3600                 # This gives bashrc users an opportunity to do various things
3601                 # such as remove binary packages after they're installed.
3602                 settings = self.settings
3603                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3604                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3605
3606                 merge = EbuildMerge(find_blockers=self.find_blockers,
3607                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3608                         pkg=self.pkg, pkg_count=self.pkg_count,
3609                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3610                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3611
3612                 try:
3613                         retval = merge.execute()
3614                 finally:
3615                         settings.pop("PORTAGE_BINPKG_FILE", None)
3616                         self._unlock_builddir()
3617                 return retval
3618
3619 class BinpkgFetcher(SpawnProcess):
3620
3621         __slots__ = ("pkg", "pretend",
3622                 "locked", "pkg_path", "_lock_obj")
3623
3624         def __init__(self, **kwargs):
3625                 SpawnProcess.__init__(self, **kwargs)
3626                 pkg = self.pkg
3627                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3628
3629         def _start(self):
3630
3631                 if self.cancelled:
3632                         return
3633
3634                 pkg = self.pkg
3635                 pretend = self.pretend
3636                 bintree = pkg.root_config.trees["bintree"]
3637                 settings = bintree.settings
3638                 use_locks = "distlocks" in settings.features
3639                 pkg_path = self.pkg_path
3640
3641                 if not pretend:
3642                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3643                         if use_locks:
3644                                 self.lock()
3645                 exists = os.path.exists(pkg_path)
3646                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3647                 if not (pretend or resume):
3648                         # Remove existing file or broken symlink.
3649                         try:
3650                                 os.unlink(pkg_path)
3651                         except OSError:
3652                                 pass
3653
3654                 # urljoin doesn't work correctly with
3655                 # unrecognized protocols like sftp
3656                 if bintree._remote_has_index:
3657                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3658                         if not rel_uri:
3659                                 rel_uri = pkg.cpv + ".tbz2"
3660                         uri = bintree._remote_base_uri.rstrip("/") + \
3661                                 "/" + rel_uri.lstrip("/")
3662                 else:
3663                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3664                                 "/" + pkg.pf + ".tbz2"
3665
3666                 if pretend:
3667                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3668                         self.returncode = os.EX_OK
3669                         self.wait()
3670                         return
3671
3672                 protocol = urlparse.urlparse(uri)[0]
3673                 fcmd_prefix = "FETCHCOMMAND"
3674                 if resume:
3675                         fcmd_prefix = "RESUMECOMMAND"
3676                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3677                 if not fcmd:
3678                         fcmd = settings.get(fcmd_prefix)
3679
3680                 fcmd_vars = {
3681                         "DISTDIR" : os.path.dirname(pkg_path),
3682                         "URI"     : uri,
3683                         "FILE"    : os.path.basename(pkg_path)
3684                 }
3685
3686                 fetch_env = dict(settings.iteritems())
3687                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3688                         for x in shlex.split(fcmd)]
3689
3690                 if self.fd_pipes is None:
3691                         self.fd_pipes = {}
3692                 fd_pipes = self.fd_pipes
3693
3694                 # Redirect all output to stdout since some fetchers like
3695                 # wget pollute stderr (if portage detects a problem then it
3696                 # can send it's own message to stderr).
3697                 fd_pipes.setdefault(0, sys.stdin.fileno())
3698                 fd_pipes.setdefault(1, sys.stdout.fileno())
3699                 fd_pipes.setdefault(2, sys.stdout.fileno())
3700
3701                 self.args = fetch_args
3702                 self.env = fetch_env
3703                 SpawnProcess._start(self)
3704
3705         def _set_returncode(self, wait_retval):
3706                 SpawnProcess._set_returncode(self, wait_retval)
3707                 if self.returncode == os.EX_OK:
3708                         # If possible, update the mtime to match the remote package if
3709                         # the fetcher didn't already do it automatically.
3710                         bintree = self.pkg.root_config.trees["bintree"]
3711                         if bintree._remote_has_index:
3712                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3713                                 if remote_mtime is not None:
3714                                         try:
3715                                                 remote_mtime = long(remote_mtime)
3716                                         except ValueError:
3717                                                 pass
3718                                         else:
3719                                                 try:
3720                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3721                                                 except OSError:
3722                                                         pass
3723                                                 else:
3724                                                         if remote_mtime != local_mtime:
3725                                                                 try:
3726                                                                         os.utime(self.pkg_path,
3727                                                                                 (remote_mtime, remote_mtime))
3728                                                                 except OSError:
3729                                                                         pass
3730
3731                 if self.locked:
3732                         self.unlock()
3733
3734         def lock(self):
3735                 """
3736                 This raises an AlreadyLocked exception if lock() is called
3737                 while a lock is already held. In order to avoid this, call
3738                 unlock() or check whether the "locked" attribute is True
3739                 or False before calling lock().
3740                 """
3741                 if self._lock_obj is not None:
3742                         raise self.AlreadyLocked((self._lock_obj,))
3743
3744                 self._lock_obj = portage.locks.lockfile(
3745                         self.pkg_path, wantnewlockfile=1)
3746                 self.locked = True
3747
3748         class AlreadyLocked(portage.exception.PortageException):
3749                 pass
3750
3751         def unlock(self):
3752                 if self._lock_obj is None:
3753                         return
3754                 portage.locks.unlockfile(self._lock_obj)
3755                 self._lock_obj = None
3756                 self.locked = False
3757
3758 class BinpkgVerifier(AsynchronousTask):
3759         __slots__ = ("logfile", "pkg",)
3760
3761         def _start(self):
3762                 """
3763                 Note: Unlike a normal AsynchronousTask.start() method,
3764                 this one does all work is synchronously. The returncode
3765                 attribute will be set before it returns.
3766                 """
3767
3768                 pkg = self.pkg
3769                 root_config = pkg.root_config
3770                 bintree = root_config.trees["bintree"]
3771                 rval = os.EX_OK
3772                 stdout_orig = sys.stdout
3773                 stderr_orig = sys.stderr
3774                 log_file = None
3775                 if self.background and self.logfile is not None:
3776                         log_file = open(self.logfile, 'a')
3777                 try:
3778                         if log_file is not None:
3779                                 sys.stdout = log_file
3780                                 sys.stderr = log_file
3781                         try:
3782                                 bintree.digestCheck(pkg)
3783                         except portage.exception.FileNotFound:
3784                                 writemsg("!!! Fetching Binary failed " + \
3785                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3786                                 rval = 1
3787                         except portage.exception.DigestException, e:
3788                                 writemsg("\n!!! Digest verification failed:\n",
3789                                         noiselevel=-1)
3790                                 writemsg("!!! %s\n" % e.value[0],
3791                                         noiselevel=-1)
3792                                 writemsg("!!! Reason: %s\n" % e.value[1],
3793                                         noiselevel=-1)
3794                                 writemsg("!!! Got: %s\n" % e.value[2],
3795                                         noiselevel=-1)
3796                                 writemsg("!!! Expected: %s\n" % e.value[3],
3797                                         noiselevel=-1)
3798                                 rval = 1
3799                         if rval != os.EX_OK:
3800                                 pkg_path = bintree.getname(pkg.cpv)
3801                                 head, tail = os.path.split(pkg_path)
3802                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3803                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3804                                         noiselevel=-1)
3805                 finally:
3806                         sys.stdout = stdout_orig
3807                         sys.stderr = stderr_orig
3808                         if log_file is not None:
3809                                 log_file.close()
3810
3811                 self.returncode = rval
3812                 self.wait()
3813
3814 class BinpkgPrefetcher(CompositeTask):
3815
3816         __slots__ = ("pkg",) + \
3817                 ("pkg_path", "_bintree",)
3818
3819         def _start(self):
3820                 self._bintree = self.pkg.root_config.trees["bintree"]
3821                 fetcher = BinpkgFetcher(background=self.background,
3822                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3823                         scheduler=self.scheduler)
3824                 self.pkg_path = fetcher.pkg_path
3825                 self._start_task(fetcher, self._fetcher_exit)
3826
3827         def _fetcher_exit(self, fetcher):
3828
3829                 if self._default_exit(fetcher) != os.EX_OK:
3830                         self.wait()
3831                         return
3832
3833                 verifier = BinpkgVerifier(background=self.background,
3834                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3835                 self._start_task(verifier, self._verifier_exit)
3836
3837         def _verifier_exit(self, verifier):
3838                 if self._default_exit(verifier) != os.EX_OK:
3839                         self.wait()
3840                         return
3841
3842                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3843
3844                 self._current_task = None
3845                 self.returncode = os.EX_OK
3846                 self.wait()
3847
3848 class BinpkgExtractorAsync(SpawnProcess):
3849
3850         __slots__ = ("image_dir", "pkg", "pkg_path")
3851
3852         _shell_binary = portage.const.BASH_BINARY
3853
3854         def _start(self):
3855                 self.args = [self._shell_binary, "-c",
3856                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3857                         (portage._shell_quote(self.pkg_path),
3858                         portage._shell_quote(self.image_dir))]
3859
3860                 self.env = self.pkg.root_config.settings.environ()
3861                 SpawnProcess._start(self)
3862
3863 class MergeListItem(CompositeTask):
3864
3865         """
3866         TODO: For parallel scheduling, everything here needs asynchronous
3867         execution support (start, poll, and wait methods).
3868         """
3869
3870         __slots__ = ("args_set",
3871                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3872                 "find_blockers", "logger", "mtimedb", "pkg",
3873                 "pkg_count", "pkg_to_replace", "prefetcher",
3874                 "settings", "statusMessage", "world_atom") + \
3875                 ("_install_task",)
3876
3877         def _start(self):
3878
3879                 pkg = self.pkg
3880                 build_opts = self.build_opts
3881
3882                 if pkg.installed:
3883                         # uninstall,  executed by self.merge()
3884                         self.returncode = os.EX_OK
3885                         self.wait()
3886                         return
3887
3888                 args_set = self.args_set
3889                 find_blockers = self.find_blockers
3890                 logger = self.logger
3891                 mtimedb = self.mtimedb
3892                 pkg_count = self.pkg_count
3893                 scheduler = self.scheduler
3894                 settings = self.settings
3895                 world_atom = self.world_atom
3896                 ldpath_mtimes = mtimedb["ldpath"]
3897
3898                 action_desc = "Emerging"
3899                 preposition = "for"
3900                 if pkg.type_name == "binary":
3901                         action_desc += " binary"
3902
3903                 if build_opts.fetchonly:
3904                         action_desc = "Fetching"
3905
3906                 msg = "%s (%s of %s) %s" % \
3907                         (action_desc,
3908                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3909                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3910                         colorize("GOOD", pkg.cpv))
3911
3912                 portdb = pkg.root_config.trees["porttree"].dbapi
3913                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3914                 if portdir_repo_name:
3915                         pkg_repo_name = pkg.metadata.get("repository")
3916                         if pkg_repo_name != portdir_repo_name:
3917                                 if not pkg_repo_name:
3918                                         pkg_repo_name = "unknown repo"
3919                                 msg += " from %s" % pkg_repo_name
3920
3921                 if pkg.root != "/":
3922                         msg += " %s %s" % (preposition, pkg.root)
3923
3924                 if not build_opts.pretend:
3925                         self.statusMessage(msg)
3926                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3927                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3928
3929                 if pkg.type_name == "ebuild":
3930
3931                         build = EbuildBuild(args_set=args_set,
3932                                 background=self.background,
3933                                 config_pool=self.config_pool,
3934                                 find_blockers=find_blockers,
3935                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3936                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3937                                 prefetcher=self.prefetcher, scheduler=scheduler,
3938                                 settings=settings, world_atom=world_atom)
3939
3940                         self._install_task = build
3941                         self._start_task(build, self._default_final_exit)
3942                         return
3943
3944                 elif pkg.type_name == "binary":
3945
3946                         binpkg = Binpkg(background=self.background,
3947                                 find_blockers=find_blockers,
3948                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3949                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3950                                 prefetcher=self.prefetcher, settings=settings,
3951                                 scheduler=scheduler, world_atom=world_atom)
3952
3953                         self._install_task = binpkg
3954                         self._start_task(binpkg, self._default_final_exit)
3955                         return
3956
3957         def _poll(self):
3958                 self._install_task.poll()
3959                 return self.returncode
3960
3961         def _wait(self):
3962                 self._install_task.wait()
3963                 return self.returncode
3964
3965         def merge(self):
3966
3967                 pkg = self.pkg
3968                 build_opts = self.build_opts
3969                 find_blockers = self.find_blockers
3970                 logger = self.logger
3971                 mtimedb = self.mtimedb
3972                 pkg_count = self.pkg_count
3973                 prefetcher = self.prefetcher
3974                 scheduler = self.scheduler
3975                 settings = self.settings
3976                 world_atom = self.world_atom
3977                 ldpath_mtimes = mtimedb["ldpath"]
3978
3979                 if pkg.installed:
3980                         if not (build_opts.buildpkgonly or \
3981                                 build_opts.fetchonly or build_opts.pretend):
3982
3983                                 uninstall = PackageUninstall(background=self.background,
3984                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3985                                         pkg=pkg, scheduler=scheduler, settings=settings)
3986
3987                                 uninstall.start()
3988                                 retval = uninstall.wait()
3989                                 if retval != os.EX_OK:
3990                                         return retval
3991                         return os.EX_OK
3992
3993                 if build_opts.fetchonly or \
3994                         build_opts.buildpkgonly:
3995                         return self.returncode
3996
3997                 retval = self._install_task.install()
3998                 return retval
3999
4000 class PackageMerge(AsynchronousTask):
4001         """
4002         TODO: Implement asynchronous merge so that the scheduler can
4003         run while a merge is executing.
4004         """
4005
4006         __slots__ = ("merge",)
4007
4008         def _start(self):
4009
4010                 pkg = self.merge.pkg
4011                 pkg_count = self.merge.pkg_count
4012
4013                 if pkg.installed:
4014                         action_desc = "Uninstalling"
4015                         preposition = "from"
4016                 else:
4017                         action_desc = "Installing"
4018                         preposition = "to"
4019
4020                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4021
4022                 if pkg.root != "/":
4023                         msg += " %s %s" % (preposition, pkg.root)
4024
4025                 if not self.merge.build_opts.fetchonly and \
4026                         not self.merge.build_opts.pretend and \
4027                         not self.merge.build_opts.buildpkgonly:
4028                         self.merge.statusMessage(msg)
4029
4030                 self.returncode = self.merge.merge()
4031                 self.wait()
4032
4033 class DependencyArg(object):
4034         def __init__(self, arg=None, root_config=None):
4035                 self.arg = arg
4036                 self.root_config = root_config
4037
4038         def __str__(self):
4039                 return str(self.arg)
4040
4041 class AtomArg(DependencyArg):
4042         def __init__(self, atom=None, **kwargs):
4043                 DependencyArg.__init__(self, **kwargs)
4044                 self.atom = atom
4045                 if not isinstance(self.atom, portage.dep.Atom):
4046                         self.atom = portage.dep.Atom(self.atom)
4047                 self.set = (self.atom, )
4048
4049 class PackageArg(DependencyArg):
4050         def __init__(self, package=None, **kwargs):
4051                 DependencyArg.__init__(self, **kwargs)
4052                 self.package = package
4053                 self.atom = portage.dep.Atom("=" + package.cpv)
4054                 self.set = (self.atom, )
4055
4056 class SetArg(DependencyArg):
4057         def __init__(self, set=None, **kwargs):
4058                 DependencyArg.__init__(self, **kwargs)
4059                 self.set = set
4060                 self.name = self.arg[len(SETPREFIX):]
4061
4062 class Dependency(SlotObject):
4063         __slots__ = ("atom", "blocker", "depth",
4064                 "parent", "onlydeps", "priority", "root")
4065         def __init__(self, **kwargs):
4066                 SlotObject.__init__(self, **kwargs)
4067                 if self.priority is None:
4068                         self.priority = DepPriority()
4069                 if self.depth is None:
4070                         self.depth = 0
4071
4072 class BlockerCache(portage.cache.mappings.MutableMapping):
4073         """This caches blockers of installed packages so that dep_check does not
4074         have to be done for every single installed package on every invocation of
4075         emerge.  The cache is invalidated whenever it is detected that something
4076         has changed that might alter the results of dep_check() calls:
4077                 1) the set of installed packages (including COUNTER) has changed
4078                 2) the old-style virtuals have changed
4079         """
4080
4081         # Number of uncached packages to trigger cache update, since
4082         # it's wasteful to update it for every vdb change.
4083         _cache_threshold = 5
4084
4085         class BlockerData(object):
4086
4087                 __slots__ = ("__weakref__", "atoms", "counter")
4088
4089                 def __init__(self, counter, atoms):
4090                         self.counter = counter
4091                         self.atoms = atoms
4092
4093         def __init__(self, myroot, vardb):
4094                 self._vardb = vardb
4095                 self._virtuals = vardb.settings.getvirtuals()
4096                 self._cache_filename = os.path.join(myroot,
4097                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4098                 self._cache_version = "1"
4099                 self._cache_data = None
4100                 self._modified = set()
4101                 self._load()
4102
4103         def _load(self):
4104                 try:
4105                         f = open(self._cache_filename, mode='rb')
4106                         mypickle = pickle.Unpickler(f)
4107                         self._cache_data = mypickle.load()
4108                         f.close()
4109                         del f
4110                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4111                         if isinstance(e, pickle.UnpicklingError):
4112                                 writemsg("!!! Error loading '%s': %s\n" % \
4113                                         (self._cache_filename, str(e)), noiselevel=-1)
4114                         del e
4115
4116                 cache_valid = self._cache_data and \
4117                         isinstance(self._cache_data, dict) and \
4118                         self._cache_data.get("version") == self._cache_version and \
4119                         isinstance(self._cache_data.get("blockers"), dict)
4120                 if cache_valid:
4121                         # Validate all the atoms and counters so that
4122                         # corruption is detected as soon as possible.
4123                         invalid_items = set()
4124                         for k, v in self._cache_data["blockers"].iteritems():
4125                                 if not isinstance(k, basestring):
4126                                         invalid_items.add(k)
4127                                         continue
4128                                 try:
4129                                         if portage.catpkgsplit(k) is None:
4130                                                 invalid_items.add(k)
4131                                                 continue
4132                                 except portage.exception.InvalidData:
4133                                         invalid_items.add(k)
4134                                         continue
4135                                 if not isinstance(v, tuple) or \
4136                                         len(v) != 2:
4137                                         invalid_items.add(k)
4138                                         continue
4139                                 counter, atoms = v
4140                                 if not isinstance(counter, (int, long)):
4141                                         invalid_items.add(k)
4142                                         continue
4143                                 if not isinstance(atoms, (list, tuple)):
4144                                         invalid_items.add(k)
4145                                         continue
4146                                 invalid_atom = False
4147                                 for atom in atoms:
4148                                         if not isinstance(atom, basestring):
4149                                                 invalid_atom = True
4150                                                 break
4151                                         if atom[:1] != "!" or \
4152                                                 not portage.isvalidatom(
4153                                                 atom, allow_blockers=True):
4154                                                 invalid_atom = True
4155                                                 break
4156                                 if invalid_atom:
4157                                         invalid_items.add(k)
4158                                         continue
4159
4160                         for k in invalid_items:
4161                                 del self._cache_data["blockers"][k]
4162                         if not self._cache_data["blockers"]:
4163                                 cache_valid = False
4164
4165                 if not cache_valid:
4166                         self._cache_data = {"version":self._cache_version}
4167                         self._cache_data["blockers"] = {}
4168                         self._cache_data["virtuals"] = self._virtuals
4169                 self._modified.clear()
4170
4171         def flush(self):
4172                 """If the current user has permission and the internal blocker cache
4173                 been updated, save it to disk and mark it unmodified.  This is called
4174                 by emerge after it has proccessed blockers for all installed packages.
4175                 Currently, the cache is only written if the user has superuser
4176                 privileges (since that's required to obtain a lock), but all users
4177                 have read access and benefit from faster blocker lookups (as long as
4178                 the entire cache is still valid).  The cache is stored as a pickled
4179                 dict object with the following format:
4180
4181                 {
4182                         version : "1",
4183                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4184                         "virtuals" : vardb.settings.getvirtuals()
4185                 }
4186                 """
4187                 if len(self._modified) >= self._cache_threshold and \
4188                         secpass >= 2:
4189                         try:
4190                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4191                                 pickle.dump(self._cache_data, f, -1)
4192                                 f.close()
4193                                 portage.util.apply_secpass_permissions(
4194                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4195                         except (IOError, OSError), e:
4196                                 pass
4197                         self._modified.clear()
4198
4199         def __setitem__(self, cpv, blocker_data):
4200                 """
4201                 Update the cache and mark it as modified for a future call to
4202                 self.flush().
4203
4204                 @param cpv: Package for which to cache blockers.
4205                 @type cpv: String
4206                 @param blocker_data: An object with counter and atoms attributes.
4207                 @type blocker_data: BlockerData
4208                 """
4209                 self._cache_data["blockers"][cpv] = \
4210                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4211                 self._modified.add(cpv)
4212
4213         def __iter__(self):
4214                 if self._cache_data is None:
4215                         # triggered by python-trace
4216                         return iter([])
4217                 return iter(self._cache_data["blockers"])
4218
4219         def __delitem__(self, cpv):
4220                 del self._cache_data["blockers"][cpv]
4221
4222         def __getitem__(self, cpv):
4223                 """
4224                 @rtype: BlockerData
4225                 @returns: An object with counter and atoms attributes.
4226                 """
4227                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4228
4229 class BlockerDB(object):
4230
4231         def __init__(self, root_config):
4232                 self._root_config = root_config
4233                 self._vartree = root_config.trees["vartree"]
4234                 self._portdb = root_config.trees["porttree"].dbapi
4235
4236                 self._dep_check_trees = None
4237                 self._fake_vartree = None
4238
4239         def _get_fake_vartree(self, acquire_lock=0):
4240                 fake_vartree = self._fake_vartree
4241                 if fake_vartree is None:
4242                         fake_vartree = FakeVartree(self._root_config,
4243                                 acquire_lock=acquire_lock)
4244                         self._fake_vartree = fake_vartree
4245                         self._dep_check_trees = { self._vartree.root : {
4246                                 "porttree"    :  fake_vartree,
4247                                 "vartree"     :  fake_vartree,
4248                         }}
4249                 else:
4250                         fake_vartree.sync(acquire_lock=acquire_lock)
4251                 return fake_vartree
4252
4253         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4254                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4255                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4256                 settings = self._vartree.settings
4257                 stale_cache = set(blocker_cache)
4258                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4259                 dep_check_trees = self._dep_check_trees
4260                 vardb = fake_vartree.dbapi
4261                 installed_pkgs = list(vardb)
4262
4263                 for inst_pkg in installed_pkgs:
4264                         stale_cache.discard(inst_pkg.cpv)
4265                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4266                         if cached_blockers is not None and \
4267                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4268                                 cached_blockers = None
4269                         if cached_blockers is not None:
4270                                 blocker_atoms = cached_blockers.atoms
4271                         else:
4272                                 # Use aux_get() to trigger FakeVartree global
4273                                 # updates on *DEPEND when appropriate.
4274                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4275                                 try:
4276                                         portage.dep._dep_check_strict = False
4277                                         success, atoms = portage.dep_check(depstr,
4278                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4279                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4280                                 finally:
4281                                         portage.dep._dep_check_strict = True
4282                                 if not success:
4283                                         pkg_location = os.path.join(inst_pkg.root,
4284                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4285                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4286                                                 (pkg_location, atoms), noiselevel=-1)
4287                                         continue
4288
4289                                 blocker_atoms = [atom for atom in atoms \
4290                                         if atom.startswith("!")]
4291                                 blocker_atoms.sort()
4292                                 counter = long(inst_pkg.metadata["COUNTER"])
4293                                 blocker_cache[inst_pkg.cpv] = \
4294                                         blocker_cache.BlockerData(counter, blocker_atoms)
4295                 for cpv in stale_cache:
4296                         del blocker_cache[cpv]
4297                 blocker_cache.flush()
4298
4299                 blocker_parents = digraph()
4300                 blocker_atoms = []
4301                 for pkg in installed_pkgs:
4302                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4303                                 blocker_atom = blocker_atom.lstrip("!")
4304                                 blocker_atoms.append(blocker_atom)
4305                                 blocker_parents.add(blocker_atom, pkg)
4306
4307                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4308                 blocking_pkgs = set()
4309                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4310                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4311
4312                 # Check for blockers in the other direction.
4313                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4314                 try:
4315                         portage.dep._dep_check_strict = False
4316                         success, atoms = portage.dep_check(depstr,
4317                                 vardb, settings, myuse=new_pkg.use.enabled,
4318                                 trees=dep_check_trees, myroot=new_pkg.root)
4319                 finally:
4320                         portage.dep._dep_check_strict = True
4321                 if not success:
4322                         # We should never get this far with invalid deps.
4323                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4324                         assert False
4325
4326                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4327                         if atom[:1] == "!"]
4328                 if blocker_atoms:
4329                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4330                         for inst_pkg in installed_pkgs:
4331                                 try:
4332                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4333                                 except (portage.exception.InvalidDependString, StopIteration):
4334                                         continue
4335                                 blocking_pkgs.add(inst_pkg)
4336
4337                 return blocking_pkgs
4338
4339 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4340
4341         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4342                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4343         p_type, p_root, p_key, p_status = parent_node
4344         msg = []
4345         if p_status == "nomerge":
4346                 category, pf = portage.catsplit(p_key)
4347                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4348                 msg.append("Portage is unable to process the dependencies of the ")
4349                 msg.append("'%s' package. " % p_key)
4350                 msg.append("In order to correct this problem, the package ")
4351                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4352                 msg.append("As a temporary workaround, the --nodeps option can ")
4353                 msg.append("be used to ignore all dependencies.  For reference, ")
4354                 msg.append("the problematic dependencies can be found in the ")
4355                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4356         else:
4357                 msg.append("This package can not be installed. ")
4358                 msg.append("Please notify the '%s' package maintainer " % p_key)
4359                 msg.append("about this problem.")
4360
4361         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4362         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4363
4364 class PackageVirtualDbapi(portage.dbapi):
4365         """
4366         A dbapi-like interface class that represents the state of the installed
4367         package database as new packages are installed, replacing any packages
4368         that previously existed in the same slot. The main difference between
4369         this class and fakedbapi is that this one uses Package instances
4370         internally (passed in via cpv_inject() and cpv_remove() calls).
4371         """
4372         def __init__(self, settings):
4373                 portage.dbapi.__init__(self)
4374                 self.settings = settings
4375                 self._match_cache = {}
4376                 self._cp_map = {}
4377                 self._cpv_map = {}
4378
4379         def clear(self):
4380                 """
4381                 Remove all packages.
4382                 """
4383                 if self._cpv_map:
4384                         self._clear_cache()
4385                         self._cp_map.clear()
4386                         self._cpv_map.clear()
4387
4388         def copy(self):
4389                 obj = PackageVirtualDbapi(self.settings)
4390                 obj._match_cache = self._match_cache.copy()
4391                 obj._cp_map = self._cp_map.copy()
4392                 for k, v in obj._cp_map.iteritems():
4393                         obj._cp_map[k] = v[:]
4394                 obj._cpv_map = self._cpv_map.copy()
4395                 return obj
4396
4397         def __iter__(self):
4398                 return self._cpv_map.itervalues()
4399
4400         def __contains__(self, item):
4401                 existing = self._cpv_map.get(item.cpv)
4402                 if existing is not None and \
4403                         existing == item:
4404                         return True
4405                 return False
4406
4407         def get(self, item, default=None):
4408                 cpv = getattr(item, "cpv", None)
4409                 if cpv is None:
4410                         if len(item) != 4:
4411                                 return default
4412                         type_name, root, cpv, operation = item
4413
4414                 existing = self._cpv_map.get(cpv)
4415                 if existing is not None and \
4416                         existing == item:
4417                         return existing
4418                 return default
4419
4420         def match_pkgs(self, atom):
4421                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4422
4423         def _clear_cache(self):
4424                 if self._categories is not None:
4425                         self._categories = None
4426                 if self._match_cache:
4427                         self._match_cache = {}
4428
4429         def match(self, origdep, use_cache=1):
4430                 result = self._match_cache.get(origdep)
4431                 if result is not None:
4432                         return result[:]
4433                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4434                 self._match_cache[origdep] = result
4435                 return result[:]
4436
4437         def cpv_exists(self, cpv):
4438                 return cpv in self._cpv_map
4439
4440         def cp_list(self, mycp, use_cache=1):
4441                 cachelist = self._match_cache.get(mycp)
4442                 # cp_list() doesn't expand old-style virtuals
4443                 if cachelist and cachelist[0].startswith(mycp):
4444                         return cachelist[:]
4445                 cpv_list = self._cp_map.get(mycp)
4446                 if cpv_list is None:
4447                         cpv_list = []
4448                 else:
4449                         cpv_list = [pkg.cpv for pkg in cpv_list]
4450                 self._cpv_sort_ascending(cpv_list)
4451                 if not (not cpv_list and mycp.startswith("virtual/")):
4452                         self._match_cache[mycp] = cpv_list
4453                 return cpv_list[:]
4454
4455         def cp_all(self):
4456                 return list(self._cp_map)
4457
4458         def cpv_all(self):
4459                 return list(self._cpv_map)
4460
4461         def cpv_inject(self, pkg):
4462                 cp_list = self._cp_map.get(pkg.cp)
4463                 if cp_list is None:
4464                         cp_list = []
4465                         self._cp_map[pkg.cp] = cp_list
4466                 e_pkg = self._cpv_map.get(pkg.cpv)
4467                 if e_pkg is not None:
4468                         if e_pkg == pkg:
4469                                 return
4470                         self.cpv_remove(e_pkg)
4471                 for e_pkg in cp_list:
4472                         if e_pkg.slot_atom == pkg.slot_atom:
4473                                 if e_pkg == pkg:
4474                                         return
4475                                 self.cpv_remove(e_pkg)
4476                                 break
4477                 cp_list.append(pkg)
4478                 self._cpv_map[pkg.cpv] = pkg
4479                 self._clear_cache()
4480
4481         def cpv_remove(self, pkg):
4482                 old_pkg = self._cpv_map.get(pkg.cpv)
4483                 if old_pkg != pkg:
4484                         raise KeyError(pkg)
4485                 self._cp_map[pkg.cp].remove(pkg)
4486                 del self._cpv_map[pkg.cpv]
4487                 self._clear_cache()
4488
4489         def aux_get(self, cpv, wants):
4490                 metadata = self._cpv_map[cpv].metadata
4491                 return [metadata.get(x, "") for x in wants]
4492
4493         def aux_update(self, cpv, values):
4494                 self._cpv_map[cpv].metadata.update(values)
4495                 self._clear_cache()
4496
4497 class depgraph(object):
4498
4499         pkg_tree_map = RootConfig.pkg_tree_map
4500
4501         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4502
4503         def __init__(self, settings, trees, myopts, myparams, spinner):
4504                 self.settings = settings
4505                 self.target_root = settings["ROOT"]
4506                 self.myopts = myopts
4507                 self.myparams = myparams
4508                 self.edebug = 0
4509                 if settings.get("PORTAGE_DEBUG", "") == "1":
4510                         self.edebug = 1
4511                 self.spinner = spinner
4512                 self._running_root = trees["/"]["root_config"]
4513                 self._opts_no_restart = Scheduler._opts_no_restart
4514                 self.pkgsettings = {}
4515                 # Maps slot atom to package for each Package added to the graph.
4516                 self._slot_pkg_map = {}
4517                 # Maps nodes to the reasons they were selected for reinstallation.
4518                 self._reinstall_nodes = {}
4519                 self.mydbapi = {}
4520                 self.trees = {}
4521                 self._trees_orig = trees
4522                 self.roots = {}
4523                 # Contains a filtered view of preferred packages that are selected
4524                 # from available repositories.
4525                 self._filtered_trees = {}
4526                 # Contains installed packages and new packages that have been added
4527                 # to the graph.
4528                 self._graph_trees = {}
4529                 # All Package instances
4530                 self._pkg_cache = {}
4531                 for myroot in trees:
4532                         self.trees[myroot] = {}
4533                         # Create a RootConfig instance that references
4534                         # the FakeVartree instead of the real one.
4535                         self.roots[myroot] = RootConfig(
4536                                 trees[myroot]["vartree"].settings,
4537                                 self.trees[myroot],
4538                                 trees[myroot]["root_config"].setconfig)
4539                         for tree in ("porttree", "bintree"):
4540                                 self.trees[myroot][tree] = trees[myroot][tree]
4541                         self.trees[myroot]["vartree"] = \
4542                                 FakeVartree(trees[myroot]["root_config"],
4543                                         pkg_cache=self._pkg_cache)
4544                         self.pkgsettings[myroot] = portage.config(
4545                                 clone=self.trees[myroot]["vartree"].settings)
4546                         self._slot_pkg_map[myroot] = {}
4547                         vardb = self.trees[myroot]["vartree"].dbapi
4548                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4549                                 "--buildpkgonly" not in self.myopts
4550                         # This fakedbapi instance will model the state that the vdb will
4551                         # have after new packages have been installed.
4552                         fakedb = PackageVirtualDbapi(vardb.settings)
4553                         if preload_installed_pkgs:
4554                                 for pkg in vardb:
4555                                         self.spinner.update()
4556                                         # This triggers metadata updates via FakeVartree.
4557                                         vardb.aux_get(pkg.cpv, [])
4558                                         fakedb.cpv_inject(pkg)
4559
4560                         # Now that the vardb state is cached in our FakeVartree,
4561                         # we won't be needing the real vartree cache for awhile.
4562                         # To make some room on the heap, clear the vardbapi
4563                         # caches.
4564                         trees[myroot]["vartree"].dbapi._clear_cache()
4565                         gc.collect()
4566
4567                         self.mydbapi[myroot] = fakedb
4568                         def graph_tree():
4569                                 pass
4570                         graph_tree.dbapi = fakedb
4571                         self._graph_trees[myroot] = {}
4572                         self._filtered_trees[myroot] = {}
4573                         # Substitute the graph tree for the vartree in dep_check() since we
4574                         # want atom selections to be consistent with package selections
4575                         # have already been made.
4576                         self._graph_trees[myroot]["porttree"]   = graph_tree
4577                         self._graph_trees[myroot]["vartree"]    = graph_tree
4578                         def filtered_tree():
4579                                 pass
4580                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4581                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4582
4583                         # Passing in graph_tree as the vartree here could lead to better
4584                         # atom selections in some cases by causing atoms for packages that
4585                         # have been added to the graph to be preferred over other choices.
4586                         # However, it can trigger atom selections that result in
4587                         # unresolvable direct circular dependencies. For example, this
4588                         # happens with gwydion-dylan which depends on either itself or
4589                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4590                         # gwydion-dylan-bin needs to be selected in order to avoid a
4591                         # an unresolvable direct circular dependency.
4592                         #
4593                         # To solve the problem described above, pass in "graph_db" so that
4594                         # packages that have been added to the graph are distinguishable
4595                         # from other available packages and installed packages. Also, pass
4596                         # the parent package into self._select_atoms() calls so that
4597                         # unresolvable direct circular dependencies can be detected and
4598                         # avoided when possible.
4599                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4600                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4601
4602                         dbs = []
4603                         portdb = self.trees[myroot]["porttree"].dbapi
4604                         bindb  = self.trees[myroot]["bintree"].dbapi
4605                         vardb  = self.trees[myroot]["vartree"].dbapi
4606                         #               (db, pkg_type, built, installed, db_keys)
4607                         if "--usepkgonly" not in self.myopts:
4608                                 db_keys = list(portdb._aux_cache_keys)
4609                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4610                         if "--usepkg" in self.myopts:
4611                                 db_keys = list(bindb._aux_cache_keys)
4612                                 dbs.append((bindb,  "binary", True, False, db_keys))
4613                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4614                         dbs.append((vardb, "installed", True, True, db_keys))
4615                         self._filtered_trees[myroot]["dbs"] = dbs
4616                         if "--usepkg" in self.myopts:
4617                                 self.trees[myroot]["bintree"].populate(
4618                                         "--getbinpkg" in self.myopts,
4619                                         "--getbinpkgonly" in self.myopts)
4620                 del trees
4621
4622                 self.digraph=portage.digraph()
4623                 # contains all sets added to the graph
4624                 self._sets = {}
4625                 # contains atoms given as arguments
4626                 self._sets["args"] = InternalPackageSet()
4627                 # contains all atoms from all sets added to the graph, including
4628                 # atoms given as arguments
4629                 self._set_atoms = InternalPackageSet()
4630                 self._atom_arg_map = {}
4631                 # contains all nodes pulled in by self._set_atoms
4632                 self._set_nodes = set()
4633                 # Contains only Blocker -> Uninstall edges
4634                 self._blocker_uninstalls = digraph()
4635                 # Contains only Package -> Blocker edges
4636                 self._blocker_parents = digraph()
4637                 # Contains only irrelevant Package -> Blocker edges
4638                 self._irrelevant_blockers = digraph()
4639                 # Contains only unsolvable Package -> Blocker edges
4640                 self._unsolvable_blockers = digraph()
4641                 # Contains all Blocker -> Blocked Package edges
4642                 self._blocked_pkgs = digraph()
4643                 # Contains world packages that have been protected from
4644                 # uninstallation but may not have been added to the graph
4645                 # if the graph is not complete yet.
4646                 self._blocked_world_pkgs = {}
4647                 self._slot_collision_info = {}
4648                 # Slot collision nodes are not allowed to block other packages since
4649                 # blocker validation is only able to account for one package per slot.
4650                 self._slot_collision_nodes = set()
4651                 self._parent_atoms = {}
4652                 self._slot_conflict_parent_atoms = set()
4653                 self._serialized_tasks_cache = None
4654                 self._scheduler_graph = None
4655                 self._displayed_list = None
4656                 self._pprovided_args = []
4657                 self._missing_args = []
4658                 self._masked_installed = set()
4659                 self._unsatisfied_deps_for_display = []
4660                 self._unsatisfied_blockers_for_display = None
4661                 self._circular_deps_for_display = None
4662                 self._dep_stack = []
4663                 self._unsatisfied_deps = []
4664                 self._initially_unsatisfied_deps = []
4665                 self._ignored_deps = []
4666                 self._required_set_names = set(["system", "world"])
4667                 self._select_atoms = self._select_atoms_highest_available
4668                 self._select_package = self._select_pkg_highest_available
4669                 self._highest_pkg_cache = {}
4670
4671         def _show_slot_collision_notice(self):
4672                 """Show an informational message advising the user to mask one of the
4673                 the packages. In some cases it may be possible to resolve this
4674                 automatically, but support for backtracking (removal nodes that have
4675                 already been selected) will be required in order to handle all possible
4676                 cases.
4677                 """
4678
4679                 if not self._slot_collision_info:
4680                         return
4681
4682                 self._show_merge_list()
4683
4684                 msg = []
4685                 msg.append("\n!!! Multiple package instances within a single " + \
4686                         "package slot have been pulled\n")
4687                 msg.append("!!! into the dependency graph, resulting" + \
4688                         " in a slot conflict:\n\n")
4689                 indent = "  "
4690                 # Max number of parents shown, to avoid flooding the display.
4691                 max_parents = 3
4692                 explanation_columns = 70
4693                 explanations = 0
4694                 for (slot_atom, root), slot_nodes \
4695                         in self._slot_collision_info.iteritems():
4696                         msg.append(str(slot_atom))
4697                         msg.append("\n\n")
4698
4699                         for node in slot_nodes:
4700                                 msg.append(indent)
4701                                 msg.append(str(node))
4702                                 parent_atoms = self._parent_atoms.get(node)
4703                                 if parent_atoms:
4704                                         pruned_list = set()
4705                                         # Prefer conflict atoms over others.
4706                                         for parent_atom in parent_atoms:
4707                                                 if len(pruned_list) >= max_parents:
4708                                                         break
4709                                                 if parent_atom in self._slot_conflict_parent_atoms:
4710                                                         pruned_list.add(parent_atom)
4711
4712                                         # If this package was pulled in by conflict atoms then
4713                                         # show those alone since those are the most interesting.
4714                                         if not pruned_list:
4715                                                 # When generating the pruned list, prefer instances
4716                                                 # of DependencyArg over instances of Package.
4717                                                 for parent_atom in parent_atoms:
4718                                                         if len(pruned_list) >= max_parents:
4719                                                                 break
4720                                                         parent, atom = parent_atom
4721                                                         if isinstance(parent, DependencyArg):
4722                                                                 pruned_list.add(parent_atom)
4723                                                 # Prefer Packages instances that themselves have been
4724                                                 # pulled into collision slots.
4725                                                 for parent_atom in parent_atoms:
4726                                                         if len(pruned_list) >= max_parents:
4727                                                                 break
4728                                                         parent, atom = parent_atom
4729                                                         if isinstance(parent, Package) and \
4730                                                                 (parent.slot_atom, parent.root) \
4731                                                                 in self._slot_collision_info:
4732                                                                 pruned_list.add(parent_atom)
4733                                                 for parent_atom in parent_atoms:
4734                                                         if len(pruned_list) >= max_parents:
4735                                                                 break
4736                                                         pruned_list.add(parent_atom)
4737                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4738                                         parent_atoms = pruned_list
4739                                         msg.append(" pulled in by\n")
4740                                         for parent_atom in parent_atoms:
4741                                                 parent, atom = parent_atom
4742                                                 msg.append(2*indent)
4743                                                 if isinstance(parent,
4744                                                         (PackageArg, AtomArg)):
4745                                                         # For PackageArg and AtomArg types, it's
4746                                                         # redundant to display the atom attribute.
4747                                                         msg.append(str(parent))
4748                                                 else:
4749                                                         # Display the specific atom from SetArg or
4750                                                         # Package types.
4751                                                         msg.append("%s required by %s" % (atom, parent))
4752                                                 msg.append("\n")
4753                                         if omitted_parents:
4754                                                 msg.append(2*indent)
4755                                                 msg.append("(and %d more)\n" % omitted_parents)
4756                                 else:
4757                                         msg.append(" (no parents)\n")
4758                                 msg.append("\n")
4759                         explanation = self._slot_conflict_explanation(slot_nodes)
4760                         if explanation:
4761                                 explanations += 1
4762                                 msg.append(indent + "Explanation:\n\n")
4763                                 for line in textwrap.wrap(explanation, explanation_columns):
4764                                         msg.append(2*indent + line + "\n")
4765                                 msg.append("\n")
4766                 msg.append("\n")
4767                 sys.stderr.write("".join(msg))
4768                 sys.stderr.flush()
4769
4770                 explanations_for_all = explanations == len(self._slot_collision_info)
4771
4772                 if explanations_for_all or "--quiet" in self.myopts:
4773                         return
4774
4775                 msg = []
4776                 msg.append("It may be possible to solve this problem ")
4777                 msg.append("by using package.mask to prevent one of ")
4778                 msg.append("those packages from being selected. ")
4779                 msg.append("However, it is also possible that conflicting ")
4780                 msg.append("dependencies exist such that they are impossible to ")
4781                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4782                 msg.append("the dependencies of two different packages, then those ")
4783                 msg.append("packages can not be installed simultaneously.")
4784
4785                 from formatter import AbstractFormatter, DumbWriter
4786                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4787                 for x in msg:
4788                         f.add_flowing_data(x)
4789                 f.end_paragraph(1)
4790
4791                 msg = []
4792                 msg.append("For more information, see MASKED PACKAGES ")
4793                 msg.append("section in the emerge man page or refer ")
4794                 msg.append("to the Gentoo Handbook.")
4795                 for x in msg:
4796                         f.add_flowing_data(x)
4797                 f.end_paragraph(1)
4798                 f.writer.flush()
4799
4800         def _slot_conflict_explanation(self, slot_nodes):
4801                 """
4802                 When a slot conflict occurs due to USE deps, there are a few
4803                 different cases to consider:
4804
4805                 1) New USE are correctly set but --newuse wasn't requested so an
4806                    installed package with incorrect USE happened to get pulled
4807                    into graph before the new one.
4808
4809                 2) New USE are incorrectly set but an installed package has correct
4810                    USE so it got pulled into the graph, and a new instance also got
4811                    pulled in due to --newuse or an upgrade.
4812
4813                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4814                    and multiple package instances got pulled into the same slot to
4815                    satisfy the conflicting deps.
4816
4817                 Currently, explanations and suggested courses of action are generated
4818                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4819                 """
4820
4821                 if len(slot_nodes) != 2:
4822                         # Suggestions are only implemented for
4823                         # conflicts between two packages.
4824                         return None
4825
4826                 all_conflict_atoms = self._slot_conflict_parent_atoms
4827                 matched_node = None
4828                 matched_atoms = None
4829                 unmatched_node = None
4830                 for node in slot_nodes:
4831                         parent_atoms = self._parent_atoms.get(node)
4832                         if not parent_atoms:
4833                                 # Normally, there are always parent atoms. If there are
4834                                 # none then something unexpected is happening and there's
4835                                 # currently no suggestion for this case.
4836                                 return None
4837                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4838                         for parent_atom in conflict_atoms:
4839                                 parent, atom = parent_atom
4840                                 if not atom.use:
4841                                         # Suggestions are currently only implemented for cases
4842                                         # in which all conflict atoms have USE deps.
4843                                         return None
4844                         if conflict_atoms:
4845                                 if matched_node is not None:
4846                                         # If conflict atoms match multiple nodes
4847                                         # then there's no suggestion.
4848                                         return None
4849                                 matched_node = node
4850                                 matched_atoms = conflict_atoms
4851                         else:
4852                                 if unmatched_node is not None:
4853                                         # Neither node is matched by conflict atoms, and
4854                                         # there is no suggestion for this case.
4855                                         return None
4856                                 unmatched_node = node
4857
4858                 if matched_node is None or unmatched_node is None:
4859                         # This shouldn't happen.
4860                         return None
4861
4862                 if unmatched_node.installed and not matched_node.installed:
4863                         return "New USE are correctly set, but --newuse wasn't" + \
4864                                 " requested, so an installed package with incorrect USE " + \
4865                                 "happened to get pulled into the dependency graph. " + \
4866                                 "In order to solve " + \
4867                                 "this, either specify the --newuse option or explicitly " + \
4868                                 " reinstall '%s'." % matched_node.slot_atom
4869
4870                 if matched_node.installed and not unmatched_node.installed:
4871                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4872                         explanation = ("New USE for '%s' are incorrectly set. " + \
4873                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4874                                 (matched_node.slot_atom, atoms[0])
4875                         if len(atoms) > 1:
4876                                 for atom in atoms[1:-1]:
4877                                         explanation += ", '%s'" % (atom,)
4878                                 if len(atoms) > 2:
4879                                         explanation += ","
4880                                 explanation += " and '%s'" % (atoms[-1],)
4881                         explanation += "."
4882                         return explanation
4883
4884                 return None
4885
4886         def _process_slot_conflicts(self):
4887                 """
4888                 Process slot conflict data to identify specific atoms which
4889                 lead to conflict. These atoms only match a subset of the
4890                 packages that have been pulled into a given slot.
4891                 """
4892                 for (slot_atom, root), slot_nodes \
4893                         in self._slot_collision_info.iteritems():
4894
4895                         all_parent_atoms = set()
4896                         for pkg in slot_nodes:
4897                                 parent_atoms = self._parent_atoms.get(pkg)
4898                                 if not parent_atoms:
4899                                         continue
4900                                 all_parent_atoms.update(parent_atoms)
4901
4902                         for pkg in slot_nodes:
4903                                 parent_atoms = self._parent_atoms.get(pkg)
4904                                 if parent_atoms is None:
4905                                         parent_atoms = set()
4906                                         self._parent_atoms[pkg] = parent_atoms
4907                                 for parent_atom in all_parent_atoms:
4908                                         if parent_atom in parent_atoms:
4909                                                 continue
4910                                         # Use package set for matching since it will match via
4911                                         # PROVIDE when necessary, while match_from_list does not.
4912                                         parent, atom = parent_atom
4913                                         atom_set = InternalPackageSet(
4914                                                 initial_atoms=(atom,))
4915                                         if atom_set.findAtomForPackage(pkg):
4916                                                 parent_atoms.add(parent_atom)
4917                                         else:
4918                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4919
4920         def _reinstall_for_flags(self, forced_flags,
4921                 orig_use, orig_iuse, cur_use, cur_iuse):
4922                 """Return a set of flags that trigger reinstallation, or None if there
4923                 are no such flags."""
4924                 if "--newuse" in self.myopts:
4925                         flags = set(orig_iuse.symmetric_difference(
4926                                 cur_iuse).difference(forced_flags))
4927                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4928                                 cur_iuse.intersection(cur_use)))
4929                         if flags:
4930                                 return flags
4931                 elif "changed-use" == self.myopts.get("--reinstall"):
4932                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4933                                 cur_iuse.intersection(cur_use))
4934                         if flags:
4935                                 return flags
4936                 return None
4937
4938         def _create_graph(self, allow_unsatisfied=False):
4939                 dep_stack = self._dep_stack
4940                 while dep_stack:
4941                         self.spinner.update()
4942                         dep = dep_stack.pop()
4943                         if isinstance(dep, Package):
4944                                 if not self._add_pkg_deps(dep,
4945                                         allow_unsatisfied=allow_unsatisfied):
4946                                         return 0
4947                                 continue
4948                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4949                                 return 0
4950                 return 1
4951
4952         def _add_dep(self, dep, allow_unsatisfied=False):
4953                 debug = "--debug" in self.myopts
4954                 buildpkgonly = "--buildpkgonly" in self.myopts
4955                 nodeps = "--nodeps" in self.myopts
4956                 empty = "empty" in self.myparams
4957                 deep = "deep" in self.myparams
4958                 update = "--update" in self.myopts and dep.depth <= 1
4959                 if dep.blocker:
4960                         if not buildpkgonly and \
4961                                 not nodeps and \
4962                                 dep.parent not in self._slot_collision_nodes:
4963                                 if dep.parent.onlydeps:
4964                                         # It's safe to ignore blockers if the
4965                                         # parent is an --onlydeps node.
4966                                         return 1
4967                                 # The blocker applies to the root where
4968                                 # the parent is or will be installed.
4969                                 blocker = Blocker(atom=dep.atom,
4970                                         eapi=dep.parent.metadata["EAPI"],
4971                                         root=dep.parent.root)
4972                                 self._blocker_parents.add(blocker, dep.parent)
4973                         return 1
4974                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4975                         onlydeps=dep.onlydeps)
4976                 if not dep_pkg:
4977                         if dep.priority.optional:
4978                                 # This could be an unecessary build-time dep
4979                                 # pulled in by --with-bdeps=y.
4980                                 return 1
4981                         if allow_unsatisfied:
4982                                 self._unsatisfied_deps.append(dep)
4983                                 return 1
4984                         self._unsatisfied_deps_for_display.append(
4985                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4986                         return 0
4987                 # In some cases, dep_check will return deps that shouldn't
4988                 # be proccessed any further, so they are identified and
4989                 # discarded here. Try to discard as few as possible since
4990                 # discarded dependencies reduce the amount of information
4991                 # available for optimization of merge order.
4992                 if dep.priority.satisfied and \
4993                         not dep_pkg.installed and \
4994                         not (existing_node or empty or deep or update):
4995                         myarg = None
4996                         if dep.root == self.target_root:
4997                                 try:
4998                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4999                                 except StopIteration:
5000                                         pass
5001                                 except portage.exception.InvalidDependString:
5002                                         if not dep_pkg.installed:
5003                                                 # This shouldn't happen since the package
5004                                                 # should have been masked.
5005                                                 raise
5006                         if not myarg:
5007                                 self._ignored_deps.append(dep)
5008                                 return 1
5009
5010                 if not self._add_pkg(dep_pkg, dep):
5011                         return 0
5012                 return 1
5013
5014         def _add_pkg(self, pkg, dep):
5015                 myparent = None
5016                 priority = None
5017                 depth = 0
5018                 if dep is None:
5019                         dep = Dependency()
5020                 else:
5021                         myparent = dep.parent
5022                         priority = dep.priority
5023                         depth = dep.depth
5024                 if priority is None:
5025                         priority = DepPriority()
5026                 """
5027                 Fills the digraph with nodes comprised of packages to merge.
5028                 mybigkey is the package spec of the package to merge.
5029                 myparent is the package depending on mybigkey ( or None )
5030                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5031                         Think --onlydeps, we need to ignore packages in that case.
5032                 #stuff to add:
5033                 #SLOT-aware emerge
5034                 #IUSE-aware emerge -> USE DEP aware depgraph
5035                 #"no downgrade" emerge
5036                 """
5037                 # Ensure that the dependencies of the same package
5038                 # are never processed more than once.
5039                 previously_added = pkg in self.digraph
5040
5041                 # select the correct /var database that we'll be checking against
5042                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5043                 pkgsettings = self.pkgsettings[pkg.root]
5044
5045                 arg_atoms = None
5046                 if True:
5047                         try:
5048                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5049                         except portage.exception.InvalidDependString, e:
5050                                 if not pkg.installed:
5051                                         show_invalid_depstring_notice(
5052                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5053                                         return 0
5054                                 del e
5055
5056                 if not pkg.onlydeps:
5057                         if not pkg.installed and \
5058                                 "empty" not in self.myparams and \
5059                                 vardbapi.match(pkg.slot_atom):
5060                                 # Increase the priority of dependencies on packages that
5061                                 # are being rebuilt. This optimizes merge order so that
5062                                 # dependencies are rebuilt/updated as soon as possible,
5063                                 # which is needed especially when emerge is called by
5064                                 # revdep-rebuild since dependencies may be affected by ABI
5065                                 # breakage that has rendered them useless. Don't adjust
5066                                 # priority here when in "empty" mode since all packages
5067                                 # are being merged in that case.
5068                                 priority.rebuild = True
5069
5070                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5071                         slot_collision = False
5072                         if existing_node:
5073                                 existing_node_matches = pkg.cpv == existing_node.cpv
5074                                 if existing_node_matches and \
5075                                         pkg != existing_node and \
5076                                         dep.atom is not None:
5077                                         # Use package set for matching since it will match via
5078                                         # PROVIDE when necessary, while match_from_list does not.
5079                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5080                                         if not atom_set.findAtomForPackage(existing_node):
5081                                                 existing_node_matches = False
5082                                 if existing_node_matches:
5083                                         # The existing node can be reused.
5084                                         if arg_atoms:
5085                                                 for parent_atom in arg_atoms:
5086                                                         parent, atom = parent_atom
5087                                                         self.digraph.add(existing_node, parent,
5088                                                                 priority=priority)
5089                                                         self._add_parent_atom(existing_node, parent_atom)
5090                                         # If a direct circular dependency is not an unsatisfied
5091                                         # buildtime dependency then drop it here since otherwise
5092                                         # it can skew the merge order calculation in an unwanted
5093                                         # way.
5094                                         if existing_node != myparent or \
5095                                                 (priority.buildtime and not priority.satisfied):
5096                                                 self.digraph.addnode(existing_node, myparent,
5097                                                         priority=priority)
5098                                                 if dep.atom is not None and dep.parent is not None:
5099                                                         self._add_parent_atom(existing_node,
5100                                                                 (dep.parent, dep.atom))
5101                                         return 1
5102                                 else:
5103
5104                                         # A slot collision has occurred.  Sometimes this coincides
5105                                         # with unresolvable blockers, so the slot collision will be
5106                                         # shown later if there are no unresolvable blockers.
5107                                         self._add_slot_conflict(pkg)
5108                                         slot_collision = True
5109
5110                         if slot_collision:
5111                                 # Now add this node to the graph so that self.display()
5112                                 # can show use flags and --tree portage.output.  This node is
5113                                 # only being partially added to the graph.  It must not be
5114                                 # allowed to interfere with the other nodes that have been
5115                                 # added.  Do not overwrite data for existing nodes in
5116                                 # self.mydbapi since that data will be used for blocker
5117                                 # validation.
5118                                 # Even though the graph is now invalid, continue to process
5119                                 # dependencies so that things like --fetchonly can still
5120                                 # function despite collisions.
5121                                 pass
5122                         elif not previously_added:
5123                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5124                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5125                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5126
5127                         if not pkg.installed:
5128                                 # Allow this package to satisfy old-style virtuals in case it
5129                                 # doesn't already. Any pre-existing providers will be preferred
5130                                 # over this one.
5131                                 try:
5132                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5133                                         # For consistency, also update the global virtuals.
5134                                         settings = self.roots[pkg.root].settings
5135                                         settings.unlock()
5136                                         settings.setinst(pkg.cpv, pkg.metadata)
5137                                         settings.lock()
5138                                 except portage.exception.InvalidDependString, e:
5139                                         show_invalid_depstring_notice(
5140                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5141                                         del e
5142                                         return 0
5143
5144                 if arg_atoms:
5145                         self._set_nodes.add(pkg)
5146
5147                 # Do this even when addme is False (--onlydeps) so that the
5148                 # parent/child relationship is always known in case
5149                 # self._show_slot_collision_notice() needs to be called later.
5150                 self.digraph.add(pkg, myparent, priority=priority)
5151                 if dep.atom is not None and dep.parent is not None:
5152                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5153
5154                 if arg_atoms:
5155                         for parent_atom in arg_atoms:
5156                                 parent, atom = parent_atom
5157                                 self.digraph.add(pkg, parent, priority=priority)
5158                                 self._add_parent_atom(pkg, parent_atom)
5159
5160                 """ This section determines whether we go deeper into dependencies or not.
5161                     We want to go deeper on a few occasions:
5162                     Installing package A, we need to make sure package A's deps are met.
5163                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5164                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5165                 """
5166                 dep_stack = self._dep_stack
5167                 if "recurse" not in self.myparams:
5168                         return 1
5169                 elif pkg.installed and \
5170                         "deep" not in self.myparams:
5171                         dep_stack = self._ignored_deps
5172
5173                 self.spinner.update()
5174
5175                 if arg_atoms:
5176                         depth = 0
5177                 pkg.depth = depth
5178                 if not previously_added:
5179                         dep_stack.append(pkg)
5180                 return 1
5181
5182         def _add_parent_atom(self, pkg, parent_atom):
5183                 parent_atoms = self._parent_atoms.get(pkg)
5184                 if parent_atoms is None:
5185                         parent_atoms = set()
5186                         self._parent_atoms[pkg] = parent_atoms
5187                 parent_atoms.add(parent_atom)
5188
5189         def _add_slot_conflict(self, pkg):
5190                 self._slot_collision_nodes.add(pkg)
5191                 slot_key = (pkg.slot_atom, pkg.root)
5192                 slot_nodes = self._slot_collision_info.get(slot_key)
5193                 if slot_nodes is None:
5194                         slot_nodes = set()
5195                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5196                         self._slot_collision_info[slot_key] = slot_nodes
5197                 slot_nodes.add(pkg)
5198
5199         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5200
5201                 mytype = pkg.type_name
5202                 myroot = pkg.root
5203                 mykey = pkg.cpv
5204                 metadata = pkg.metadata
5205                 myuse = pkg.use.enabled
5206                 jbigkey = pkg
5207                 depth = pkg.depth + 1
5208                 removal_action = "remove" in self.myparams
5209
5210                 edepend={}
5211                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5212                 for k in depkeys:
5213                         edepend[k] = metadata[k]
5214
5215                 if not pkg.built and \
5216                         "--buildpkgonly" in self.myopts and \
5217                         "deep" not in self.myparams and \
5218                         "empty" not in self.myparams:
5219                         edepend["RDEPEND"] = ""
5220                         edepend["PDEPEND"] = ""
5221                 bdeps_optional = False
5222
5223                 if pkg.built and not removal_action:
5224                         if self.myopts.get("--with-bdeps", "n") == "y":
5225                                 # Pull in build time deps as requested, but marked them as
5226                                 # "optional" since they are not strictly required. This allows
5227                                 # more freedom in the merge order calculation for solving
5228                                 # circular dependencies. Don't convert to PDEPEND since that
5229                                 # could make --with-bdeps=y less effective if it is used to
5230                                 # adjust merge order to prevent built_with_use() calls from
5231                                 # failing.
5232                                 bdeps_optional = True
5233                         else:
5234                                 # built packages do not have build time dependencies.
5235                                 edepend["DEPEND"] = ""
5236
5237                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5238                         edepend["DEPEND"] = ""
5239
5240                 deps = (
5241                         ("/", edepend["DEPEND"],
5242                                 self._priority(buildtime=(not bdeps_optional),
5243                                 optional=bdeps_optional)),
5244                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5245                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5246                 )
5247
5248                 debug = "--debug" in self.myopts
5249                 strict = mytype != "installed"
5250                 try:
5251                         for dep_root, dep_string, dep_priority in deps:
5252                                 if not dep_string:
5253                                         continue
5254                                 if debug:
5255                                         print
5256                                         print "Parent:   ", jbigkey
5257                                         print "Depstring:", dep_string
5258                                         print "Priority:", dep_priority
5259                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5260                                 try:
5261                                         selected_atoms = self._select_atoms(dep_root,
5262                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5263                                                 priority=dep_priority)
5264                                 except portage.exception.InvalidDependString, e:
5265                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5266                                         return 0
5267                                 if debug:
5268                                         print "Candidates:", selected_atoms
5269
5270                                 for atom in selected_atoms:
5271                                         try:
5272
5273                                                 atom = portage.dep.Atom(atom)
5274
5275                                                 mypriority = dep_priority.copy()
5276                                                 if not atom.blocker and vardb.match(atom):
5277                                                         mypriority.satisfied = True
5278
5279                                                 if not self._add_dep(Dependency(atom=atom,
5280                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5281                                                         priority=mypriority, root=dep_root),
5282                                                         allow_unsatisfied=allow_unsatisfied):
5283                                                         return 0
5284
5285                                         except portage.exception.InvalidAtom, e:
5286                                                 show_invalid_depstring_notice(
5287                                                         pkg, dep_string, str(e))
5288                                                 del e
5289                                                 if not pkg.installed:
5290                                                         return 0
5291
5292                                 if debug:
5293                                         print "Exiting...", jbigkey
5294                 except portage.exception.AmbiguousPackageName, e:
5295                         pkgs = e.args[0]
5296                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5297                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5298                         for cpv in pkgs:
5299                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5300                         portage.writemsg("\n", noiselevel=-1)
5301                         if mytype == "binary":
5302                                 portage.writemsg(
5303                                         "!!! This binary package cannot be installed: '%s'\n" % \
5304                                         mykey, noiselevel=-1)
5305                         elif mytype == "ebuild":
5306                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5307                                 myebuild, mylocation = portdb.findname2(mykey)
5308                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5309                                         "'%s'\n" % myebuild, noiselevel=-1)
5310                         portage.writemsg("!!! Please notify the package maintainer " + \
5311                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5312                         return 0
5313                 return 1
5314
5315         def _priority(self, **kwargs):
5316                 if "remove" in self.myparams:
5317                         priority_constructor = UnmergeDepPriority
5318                 else:
5319                         priority_constructor = DepPriority
5320                 return priority_constructor(**kwargs)
5321
5322         def _dep_expand(self, root_config, atom_without_category):
5323                 """
5324                 @param root_config: a root config instance
5325                 @type root_config: RootConfig
5326                 @param atom_without_category: an atom without a category component
5327                 @type atom_without_category: String
5328                 @rtype: list
5329                 @returns: a list of atoms containing categories (possibly empty)
5330                 """
5331                 null_cp = portage.dep_getkey(insert_category_into_atom(
5332                         atom_without_category, "null"))
5333                 cat, atom_pn = portage.catsplit(null_cp)
5334
5335                 dbs = self._filtered_trees[root_config.root]["dbs"]
5336                 categories = set()
5337                 for db, pkg_type, built, installed, db_keys in dbs:
5338                         for cat in db.categories:
5339                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5340                                         categories.add(cat)
5341
5342                 deps = []
5343                 for cat in categories:
5344                         deps.append(insert_category_into_atom(
5345                                 atom_without_category, cat))
5346                 return deps
5347
5348         def _have_new_virt(self, root, atom_cp):
5349                 ret = False
5350                 for db, pkg_type, built, installed, db_keys in \
5351                         self._filtered_trees[root]["dbs"]:
5352                         if db.cp_list(atom_cp):
5353                                 ret = True
5354                                 break
5355                 return ret
5356
5357         def _iter_atoms_for_pkg(self, pkg):
5358                 # TODO: add multiple $ROOT support
5359                 if pkg.root != self.target_root:
5360                         return
5361                 atom_arg_map = self._atom_arg_map
5362                 root_config = self.roots[pkg.root]
5363                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5364                         atom_cp = portage.dep_getkey(atom)
5365                         if atom_cp != pkg.cp and \
5366                                 self._have_new_virt(pkg.root, atom_cp):
5367                                 continue
5368                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5369                         visible_pkgs.reverse() # descending order
5370                         higher_slot = None
5371                         for visible_pkg in visible_pkgs:
5372                                 if visible_pkg.cp != atom_cp:
5373                                         continue
5374                                 if pkg >= visible_pkg:
5375                                         # This is descending order, and we're not
5376                                         # interested in any versions <= pkg given.
5377                                         break
5378                                 if pkg.slot_atom != visible_pkg.slot_atom:
5379                                         higher_slot = visible_pkg
5380                                         break
5381                         if higher_slot is not None:
5382                                 continue
5383                         for arg in atom_arg_map[(atom, pkg.root)]:
5384                                 if isinstance(arg, PackageArg) and \
5385                                         arg.package != pkg:
5386                                         continue
5387                                 yield arg, atom
5388
5389         def select_files(self, myfiles):
5390                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5391                 appropriate depgraph and return a favorite list."""
5392                 debug = "--debug" in self.myopts
5393                 root_config = self.roots[self.target_root]
5394                 sets = root_config.sets
5395                 getSetAtoms = root_config.setconfig.getSetAtoms
5396                 myfavorites=[]
5397                 myroot = self.target_root
5398                 dbs = self._filtered_trees[myroot]["dbs"]
5399                 vardb = self.trees[myroot]["vartree"].dbapi
5400                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5401                 portdb = self.trees[myroot]["porttree"].dbapi
5402                 bindb = self.trees[myroot]["bintree"].dbapi
5403                 pkgsettings = self.pkgsettings[myroot]
5404                 args = []
5405                 onlydeps = "--onlydeps" in self.myopts
5406                 lookup_owners = []
5407                 for x in myfiles:
5408                         ext = os.path.splitext(x)[1]
5409                         if ext==".tbz2":
5410                                 if not os.path.exists(x):
5411                                         if os.path.exists(
5412                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5413                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5414                                         elif os.path.exists(
5415                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5416                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5417                                         else:
5418                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5419                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5420                                                 return 0, myfavorites
5421                                 mytbz2=portage.xpak.tbz2(x)
5422                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5423                                 if os.path.realpath(x) != \
5424                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5425                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5426                                         return 0, myfavorites
5427                                 db_keys = list(bindb._aux_cache_keys)
5428                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5429                                 pkg = Package(type_name="binary", root_config=root_config,
5430                                         cpv=mykey, built=True, metadata=metadata,
5431                                         onlydeps=onlydeps)
5432                                 self._pkg_cache[pkg] = pkg
5433                                 args.append(PackageArg(arg=x, package=pkg,
5434                                         root_config=root_config))
5435                         elif ext==".ebuild":
5436                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5437                                 pkgdir = os.path.dirname(ebuild_path)
5438                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5439                                 cp = pkgdir[len(tree_root)+1:]
5440                                 e = portage.exception.PackageNotFound(
5441                                         ("%s is not in a valid portage tree " + \
5442                                         "hierarchy or does not exist") % x)
5443                                 if not portage.isvalidatom(cp):
5444                                         raise e
5445                                 cat = portage.catsplit(cp)[0]
5446                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5447                                 if not portage.isvalidatom("="+mykey):
5448                                         raise e
5449                                 ebuild_path = portdb.findname(mykey)
5450                                 if ebuild_path:
5451                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5452                                                 cp, os.path.basename(ebuild_path)):
5453                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5454                                                 return 0, myfavorites
5455                                         if mykey not in portdb.xmatch(
5456                                                 "match-visible", portage.dep_getkey(mykey)):
5457                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5458                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5459                                                 print colorize("BAD", "*** page for details.")
5460                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5461                                                         "Continuing...")
5462                                 else:
5463                                         raise portage.exception.PackageNotFound(
5464                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5465                                 db_keys = list(portdb._aux_cache_keys)
5466                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5467                                 pkg = Package(type_name="ebuild", root_config=root_config,
5468                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5469                                 pkgsettings.setcpv(pkg)
5470                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5471                                 self._pkg_cache[pkg] = pkg
5472                                 args.append(PackageArg(arg=x, package=pkg,
5473                                         root_config=root_config))
5474                         elif x.startswith(os.path.sep):
5475                                 if not x.startswith(myroot):
5476                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5477                                                 " $ROOT.\n") % x, noiselevel=-1)
5478                                         return 0, []
5479                                 # Queue these up since it's most efficient to handle
5480                                 # multiple files in a single iter_owners() call.
5481                                 lookup_owners.append(x)
5482                         else:
5483                                 if x in ("system", "world"):
5484                                         x = SETPREFIX + x
5485                                 if x.startswith(SETPREFIX):
5486                                         s = x[len(SETPREFIX):]
5487                                         if s not in sets:
5488                                                 raise portage.exception.PackageSetNotFound(s)
5489                                         if s in self._sets:
5490                                                 continue
5491                                         # Recursively expand sets so that containment tests in
5492                                         # self._get_parent_sets() properly match atoms in nested
5493                                         # sets (like if world contains system).
5494                                         expanded_set = InternalPackageSet(
5495                                                 initial_atoms=getSetAtoms(s))
5496                                         self._sets[s] = expanded_set
5497                                         args.append(SetArg(arg=x, set=expanded_set,
5498                                                 root_config=root_config))
5499                                         continue
5500                                 if not is_valid_package_atom(x):
5501                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5502                                                 noiselevel=-1)
5503                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5504                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5505                                         return (0,[])
5506                                 # Don't expand categories or old-style virtuals here unless
5507                                 # necessary. Expansion of old-style virtuals here causes at
5508                                 # least the following problems:
5509                                 #   1) It's more difficult to determine which set(s) an atom
5510                                 #      came from, if any.
5511                                 #   2) It takes away freedom from the resolver to choose other
5512                                 #      possible expansions when necessary.
5513                                 if "/" in x:
5514                                         args.append(AtomArg(arg=x, atom=x,
5515                                                 root_config=root_config))
5516                                         continue
5517                                 expanded_atoms = self._dep_expand(root_config, x)
5518                                 installed_cp_set = set()
5519                                 for atom in expanded_atoms:
5520                                         atom_cp = portage.dep_getkey(atom)
5521                                         if vardb.cp_list(atom_cp):
5522                                                 installed_cp_set.add(atom_cp)
5523                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5524                                         installed_cp = iter(installed_cp_set).next()
5525                                         expanded_atoms = [atom for atom in expanded_atoms \
5526                                                 if portage.dep_getkey(atom) == installed_cp]
5527
5528                                 if len(expanded_atoms) > 1:
5529                                         print
5530                                         print
5531                                         ambiguous_package_name(x, expanded_atoms, root_config,
5532                                                 self.spinner, self.myopts)
5533                                         return False, myfavorites
5534                                 if expanded_atoms:
5535                                         atom = expanded_atoms[0]
5536                                 else:
5537                                         null_atom = insert_category_into_atom(x, "null")
5538                                         null_cp = portage.dep_getkey(null_atom)
5539                                         cat, atom_pn = portage.catsplit(null_cp)
5540                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5541                                         if virts_p:
5542                                                 # Allow the depgraph to choose which virtual.
5543                                                 atom = insert_category_into_atom(x, "virtual")
5544                                         else:
5545                                                 atom = insert_category_into_atom(x, "null")
5546
5547                                 args.append(AtomArg(arg=x, atom=atom,
5548                                         root_config=root_config))
5549
5550                 if lookup_owners:
5551                         relative_paths = []
5552                         search_for_multiple = False
5553                         if len(lookup_owners) > 1:
5554                                 search_for_multiple = True
5555
5556                         for x in lookup_owners:
5557                                 if not search_for_multiple and os.path.isdir(x):
5558                                         search_for_multiple = True
5559                                 relative_paths.append(x[len(myroot):])
5560
5561                         owners = set()
5562                         for pkg, relative_path in \
5563                                 real_vardb._owners.iter_owners(relative_paths):
5564                                 owners.add(pkg.mycpv)
5565                                 if not search_for_multiple:
5566                                         break
5567
5568                         if not owners:
5569                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5570                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5571                                 return 0, []
5572
5573                         for cpv in owners:
5574                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5575                                 if not slot:
5576                                         # portage now masks packages with missing slot, but it's
5577                                         # possible that one was installed by an older version
5578                                         atom = portage.cpv_getkey(cpv)
5579                                 else:
5580                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5581                                 args.append(AtomArg(arg=atom, atom=atom,
5582                                         root_config=root_config))
5583
5584                 if "--update" in self.myopts:
5585                         # In some cases, the greedy slots behavior can pull in a slot that
5586                         # the user would want to uninstall due to it being blocked by a
5587                         # newer version in a different slot. Therefore, it's necessary to
5588                         # detect and discard any that should be uninstalled. Each time
5589                         # that arguments are updated, package selections are repeated in
5590                         # order to ensure consistency with the current arguments:
5591                         #
5592                         #  1) Initialize args
5593                         #  2) Select packages and generate initial greedy atoms
5594                         #  3) Update args with greedy atoms
5595                         #  4) Select packages and generate greedy atoms again, while
5596                         #     accounting for any blockers between selected packages
5597                         #  5) Update args with revised greedy atoms
5598
5599                         self._set_args(args)
5600                         greedy_args = []
5601                         for arg in args:
5602                                 greedy_args.append(arg)
5603                                 if not isinstance(arg, AtomArg):
5604                                         continue
5605                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5606                                         greedy_args.append(
5607                                                 AtomArg(arg=arg.arg, atom=atom,
5608                                                         root_config=arg.root_config))
5609
5610                         self._set_args(greedy_args)
5611                         del greedy_args
5612
5613                         # Revise greedy atoms, accounting for any blockers
5614                         # between selected packages.
5615                         revised_greedy_args = []
5616                         for arg in args:
5617                                 revised_greedy_args.append(arg)
5618                                 if not isinstance(arg, AtomArg):
5619                                         continue
5620                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5621                                         blocker_lookahead=True):
5622                                         revised_greedy_args.append(
5623                                                 AtomArg(arg=arg.arg, atom=atom,
5624                                                         root_config=arg.root_config))
5625                         args = revised_greedy_args
5626                         del revised_greedy_args
5627
5628                 self._set_args(args)
5629
5630                 myfavorites = set(myfavorites)
5631                 for arg in args:
5632                         if isinstance(arg, (AtomArg, PackageArg)):
5633                                 myfavorites.add(arg.atom)
5634                         elif isinstance(arg, SetArg):
5635                                 myfavorites.add(arg.arg)
5636                 myfavorites = list(myfavorites)
5637
5638                 pprovideddict = pkgsettings.pprovideddict
5639                 if debug:
5640                         portage.writemsg("\n", noiselevel=-1)
5641                 # Order needs to be preserved since a feature of --nodeps
5642                 # is to allow the user to force a specific merge order.
5643                 args.reverse()
5644                 while args:
5645                         arg = args.pop()
5646                         for atom in arg.set:
5647                                 self.spinner.update()
5648                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5649                                         root=myroot, parent=arg)
5650                                 atom_cp = portage.dep_getkey(atom)
5651                                 try:
5652                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5653                                         if pprovided and portage.match_from_list(atom, pprovided):
5654                                                 # A provided package has been specified on the command line.
5655                                                 self._pprovided_args.append((arg, atom))
5656                                                 continue
5657                                         if isinstance(arg, PackageArg):
5658                                                 if not self._add_pkg(arg.package, dep) or \
5659                                                         not self._create_graph():
5660                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5661                                                                 "dependencies for %s\n") % arg.arg)
5662                                                         return 0, myfavorites
5663                                                 continue
5664                                         if debug:
5665                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5666                                                         (arg, atom), noiselevel=-1)
5667                                         pkg, existing_node = self._select_package(
5668                                                 myroot, atom, onlydeps=onlydeps)
5669                                         if not pkg:
5670                                                 if not (isinstance(arg, SetArg) and \
5671                                                         arg.name in ("system", "world")):
5672                                                         self._unsatisfied_deps_for_display.append(
5673                                                                 ((myroot, atom), {}))
5674                                                         return 0, myfavorites
5675                                                 self._missing_args.append((arg, atom))
5676                                                 continue
5677                                         if atom_cp != pkg.cp:
5678                                                 # For old-style virtuals, we need to repeat the
5679                                                 # package.provided check against the selected package.
5680                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5681                                                 pprovided = pprovideddict.get(pkg.cp)
5682                                                 if pprovided and \
5683                                                         portage.match_from_list(expanded_atom, pprovided):
5684                                                         # A provided package has been
5685                                                         # specified on the command line.
5686                                                         self._pprovided_args.append((arg, atom))
5687                                                         continue
5688                                         if pkg.installed and "selective" not in self.myparams:
5689                                                 self._unsatisfied_deps_for_display.append(
5690                                                         ((myroot, atom), {}))
5691                                                 # Previous behavior was to bail out in this case, but
5692                                                 # since the dep is satisfied by the installed package,
5693                                                 # it's more friendly to continue building the graph
5694                                                 # and just show a warning message. Therefore, only bail
5695                                                 # out here if the atom is not from either the system or
5696                                                 # world set.
5697                                                 if not (isinstance(arg, SetArg) and \
5698                                                         arg.name in ("system", "world")):
5699                                                         return 0, myfavorites
5700
5701                                         # Add the selected package to the graph as soon as possible
5702                                         # so that later dep_check() calls can use it as feedback
5703                                         # for making more consistent atom selections.
5704                                         if not self._add_pkg(pkg, dep):
5705                                                 if isinstance(arg, SetArg):
5706                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5707                                                                 "dependencies for %s from %s\n") % \
5708                                                                 (atom, arg.arg))
5709                                                 else:
5710                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5711                                                                 "dependencies for %s\n") % atom)
5712                                                 return 0, myfavorites
5713
5714                                 except portage.exception.MissingSignature, e:
5715                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5716                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5717                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5718                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5719                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5720                                         return 0, myfavorites
5721                                 except portage.exception.InvalidSignature, e:
5722                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5723                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5724                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5725                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5726                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5727                                         return 0, myfavorites
5728                                 except SystemExit, e:
5729                                         raise # Needed else can't exit
5730                                 except Exception, e:
5731                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5732                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5733                                         raise
5734
5735                 # Now that the root packages have been added to the graph,
5736                 # process the dependencies.
5737                 if not self._create_graph():
5738                         return 0, myfavorites
5739
5740                 missing=0
5741                 if "--usepkgonly" in self.myopts:
5742                         for xs in self.digraph.all_nodes():
5743                                 if not isinstance(xs, Package):
5744                                         continue
5745                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5746                                         if missing == 0:
5747                                                 print
5748                                         missing += 1
5749                                         print "Missing binary for:",xs[2]
5750
5751                 try:
5752                         self.altlist()
5753                 except self._unknown_internal_error:
5754                         return False, myfavorites
5755
5756                 # We're true here unless we are missing binaries.
5757                 return (not missing,myfavorites)
5758
5759         def _set_args(self, args):
5760                 """
5761                 Create the "args" package set from atoms and packages given as
5762                 arguments. This method can be called multiple times if necessary.
5763                 The package selection cache is automatically invalidated, since
5764                 arguments influence package selections.
5765                 """
5766                 args_set = self._sets["args"]
5767                 args_set.clear()
5768                 for arg in args:
5769                         if not isinstance(arg, (AtomArg, PackageArg)):
5770                                 continue
5771                         atom = arg.atom
5772                         if atom in args_set:
5773                                 continue
5774                         args_set.add(atom)
5775
5776                 self._set_atoms.clear()
5777                 self._set_atoms.update(chain(*self._sets.itervalues()))
5778                 atom_arg_map = self._atom_arg_map
5779                 atom_arg_map.clear()
5780                 for arg in args:
5781                         for atom in arg.set:
5782                                 atom_key = (atom, arg.root_config.root)
5783                                 refs = atom_arg_map.get(atom_key)
5784                                 if refs is None:
5785                                         refs = []
5786                                         atom_arg_map[atom_key] = refs
5787                                         if arg not in refs:
5788                                                 refs.append(arg)
5789
5790                 # Invalidate the package selection cache, since
5791                 # arguments influence package selections.
5792                 self._highest_pkg_cache.clear()
5793                 for trees in self._filtered_trees.itervalues():
5794                         trees["porttree"].dbapi._clear_cache()
5795
5796         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5797                 """
5798                 Return a list of slot atoms corresponding to installed slots that
5799                 differ from the slot of the highest visible match. When
5800                 blocker_lookahead is True, slot atoms that would trigger a blocker
5801                 conflict are automatically discarded, potentially allowing automatic
5802                 uninstallation of older slots when appropriate.
5803                 """
5804                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5805                 if highest_pkg is None:
5806                         return []
5807                 vardb = root_config.trees["vartree"].dbapi
5808                 slots = set()
5809                 for cpv in vardb.match(atom):
5810                         # don't mix new virtuals with old virtuals
5811                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5812                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5813
5814                 slots.add(highest_pkg.metadata["SLOT"])
5815                 if len(slots) == 1:
5816                         return []
5817                 greedy_pkgs = []
5818                 slots.remove(highest_pkg.metadata["SLOT"])
5819                 while slots:
5820                         slot = slots.pop()
5821                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5822                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5823                         if pkg is not None and \
5824                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5825                                 greedy_pkgs.append(pkg)
5826                 if not greedy_pkgs:
5827                         return []
5828                 if not blocker_lookahead:
5829                         return [pkg.slot_atom for pkg in greedy_pkgs]
5830
5831                 blockers = {}
5832                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5833                 for pkg in greedy_pkgs + [highest_pkg]:
5834                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5835                         try:
5836                                 atoms = self._select_atoms(
5837                                         pkg.root, dep_str, pkg.use.enabled,
5838                                         parent=pkg, strict=True)
5839                         except portage.exception.InvalidDependString:
5840                                 continue
5841                         blocker_atoms = (x for x in atoms if x.blocker)
5842                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5843
5844                 if highest_pkg not in blockers:
5845                         return []
5846
5847                 # filter packages with invalid deps
5848                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5849
5850                 # filter packages that conflict with highest_pkg
5851                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5852                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5853                         blockers[pkg].findAtomForPackage(highest_pkg))]
5854
5855                 if not greedy_pkgs:
5856                         return []
5857
5858                 # If two packages conflict, discard the lower version.
5859                 discard_pkgs = set()
5860                 greedy_pkgs.sort(reverse=True)
5861                 for i in xrange(len(greedy_pkgs) - 1):
5862                         pkg1 = greedy_pkgs[i]
5863                         if pkg1 in discard_pkgs:
5864                                 continue
5865                         for j in xrange(i + 1, len(greedy_pkgs)):
5866                                 pkg2 = greedy_pkgs[j]
5867                                 if pkg2 in discard_pkgs:
5868                                         continue
5869                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5870                                         blockers[pkg2].findAtomForPackage(pkg1):
5871                                         # pkg1 > pkg2
5872                                         discard_pkgs.add(pkg2)
5873
5874                 return [pkg.slot_atom for pkg in greedy_pkgs \
5875                         if pkg not in discard_pkgs]
5876
5877         def _select_atoms_from_graph(self, *pargs, **kwargs):
5878                 """
5879                 Prefer atoms matching packages that have already been
5880                 added to the graph or those that are installed and have
5881                 not been scheduled for replacement.
5882                 """
5883                 kwargs["trees"] = self._graph_trees
5884                 return self._select_atoms_highest_available(*pargs, **kwargs)
5885
5886         def _select_atoms_highest_available(self, root, depstring,
5887                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5888                 """This will raise InvalidDependString if necessary. If trees is
5889                 None then self._filtered_trees is used."""
5890                 pkgsettings = self.pkgsettings[root]
5891                 if trees is None:
5892                         trees = self._filtered_trees
5893                 if not getattr(priority, "buildtime", False):
5894                         # The parent should only be passed to dep_check() for buildtime
5895                         # dependencies since that's the only case when it's appropriate
5896                         # to trigger the circular dependency avoidance code which uses it.
5897                         # It's important not to trigger the same circular dependency
5898                         # avoidance code for runtime dependencies since it's not needed
5899                         # and it can promote an incorrect package choice.
5900                         parent = None
5901                 if True:
5902                         try:
5903                                 if parent is not None:
5904                                         trees[root]["parent"] = parent
5905                                 if not strict:
5906                                         portage.dep._dep_check_strict = False
5907                                 mycheck = portage.dep_check(depstring, None,
5908                                         pkgsettings, myuse=myuse,
5909                                         myroot=root, trees=trees)
5910                         finally:
5911                                 if parent is not None:
5912                                         trees[root].pop("parent")
5913                                 portage.dep._dep_check_strict = True
5914                         if not mycheck[0]:
5915                                 raise portage.exception.InvalidDependString(mycheck[1])
5916                         selected_atoms = mycheck[1]
5917                 return selected_atoms
5918
5919         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5920                 atom = portage.dep.Atom(atom)
5921                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5922                 atom_without_use = atom
5923                 if atom.use:
5924                         atom_without_use = portage.dep.remove_slot(atom)
5925                         if atom.slot:
5926                                 atom_without_use += ":" + atom.slot
5927                         atom_without_use = portage.dep.Atom(atom_without_use)
5928                 xinfo = '"%s"' % atom
5929                 if arg:
5930                         xinfo='"%s"' % arg
5931                 # Discard null/ from failed cpv_expand category expansion.
5932                 xinfo = xinfo.replace("null/", "")
5933                 masked_packages = []
5934                 missing_use = []
5935                 missing_licenses = []
5936                 have_eapi_mask = False
5937                 pkgsettings = self.pkgsettings[root]
5938                 implicit_iuse = pkgsettings._get_implicit_iuse()
5939                 root_config = self.roots[root]
5940                 portdb = self.roots[root].trees["porttree"].dbapi
5941                 dbs = self._filtered_trees[root]["dbs"]
5942                 for db, pkg_type, built, installed, db_keys in dbs:
5943                         if installed:
5944                                 continue
5945                         match = db.match
5946                         if hasattr(db, "xmatch"):
5947                                 cpv_list = db.xmatch("match-all", atom_without_use)
5948                         else:
5949                                 cpv_list = db.match(atom_without_use)
5950                         # descending order
5951                         cpv_list.reverse()
5952                         for cpv in cpv_list:
5953                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5954                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5955                                 if metadata is not None:
5956                                         pkg = Package(built=built, cpv=cpv,
5957                                                 installed=installed, metadata=metadata,
5958                                                 root_config=root_config)
5959                                         if pkg.cp != atom.cp:
5960                                                 # A cpv can be returned from dbapi.match() as an
5961                                                 # old-style virtual match even in cases when the
5962                                                 # package does not actually PROVIDE the virtual.
5963                                                 # Filter out any such false matches here.
5964                                                 if not atom_set.findAtomForPackage(pkg):
5965                                                         continue
5966                                         if atom.use and not mreasons:
5967                                                 missing_use.append(pkg)
5968                                                 continue
5969                                 masked_packages.append(
5970                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5971
5972                 missing_use_reasons = []
5973                 missing_iuse_reasons = []
5974                 for pkg in missing_use:
5975                         use = pkg.use.enabled
5976                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5977                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5978                         missing_iuse = []
5979                         for x in atom.use.required:
5980                                 if iuse_re.match(x) is None:
5981                                         missing_iuse.append(x)
5982                         mreasons = []
5983                         if missing_iuse:
5984                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5985                                 missing_iuse_reasons.append((pkg, mreasons))
5986                         else:
5987                                 need_enable = sorted(atom.use.enabled.difference(use))
5988                                 need_disable = sorted(atom.use.disabled.intersection(use))
5989                                 if need_enable or need_disable:
5990                                         changes = []
5991                                         changes.extend(colorize("red", "+" + x) \
5992                                                 for x in need_enable)
5993                                         changes.extend(colorize("blue", "-" + x) \
5994                                                 for x in need_disable)
5995                                         mreasons.append("Change USE: %s" % " ".join(changes))
5996                                         missing_use_reasons.append((pkg, mreasons))
5997
5998                 if missing_iuse_reasons and not missing_use_reasons:
5999                         missing_use_reasons = missing_iuse_reasons
6000                 elif missing_use_reasons:
6001                         # Only show the latest version.
6002                         del missing_use_reasons[1:]
6003
6004                 if missing_use_reasons:
6005                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6006                         print "!!! One of the following packages is required to complete your request:"
6007                         for pkg, mreasons in missing_use_reasons:
6008                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6009
6010                 elif masked_packages:
6011                         print "\n!!! " + \
6012                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6013                                 colorize("INFORM", xinfo) + \
6014                                 colorize("BAD", " have been masked.")
6015                         print "!!! One of the following masked packages is required to complete your request:"
6016                         have_eapi_mask = show_masked_packages(masked_packages)
6017                         if have_eapi_mask:
6018                                 print
6019                                 msg = ("The current version of portage supports " + \
6020                                         "EAPI '%s'. You must upgrade to a newer version" + \
6021                                         " of portage before EAPI masked packages can" + \
6022                                         " be installed.") % portage.const.EAPI
6023                                 from textwrap import wrap
6024                                 for line in wrap(msg, 75):
6025                                         print line
6026                         print
6027                         show_mask_docs()
6028                 else:
6029                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6030
6031                 # Show parent nodes and the argument that pulled them in.
6032                 traversed_nodes = set()
6033                 node = myparent
6034                 msg = []
6035                 while node is not None:
6036                         traversed_nodes.add(node)
6037                         msg.append('(dependency required by "%s" [%s])' % \
6038                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6039                         # When traversing to parents, prefer arguments over packages
6040                         # since arguments are root nodes. Never traverse the same
6041                         # package twice, in order to prevent an infinite loop.
6042                         selected_parent = None
6043                         for parent in self.digraph.parent_nodes(node):
6044                                 if isinstance(parent, DependencyArg):
6045                                         msg.append('(dependency required by "%s" [argument])' % \
6046                                                 (colorize('INFORM', str(parent))))
6047                                         selected_parent = None
6048                                         break
6049                                 if parent not in traversed_nodes:
6050                                         selected_parent = parent
6051                         node = selected_parent
6052                 for line in msg:
6053                         print line
6054
6055                 print
6056
6057         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6058                 cache_key = (root, atom, onlydeps)
6059                 ret = self._highest_pkg_cache.get(cache_key)
6060                 if ret is not None:
6061                         pkg, existing = ret
6062                         if pkg and not existing:
6063                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6064                                 if existing and existing == pkg:
6065                                         # Update the cache to reflect that the
6066                                         # package has been added to the graph.
6067                                         ret = pkg, pkg
6068                                         self._highest_pkg_cache[cache_key] = ret
6069                         return ret
6070                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6071                 self._highest_pkg_cache[cache_key] = ret
6072                 pkg, existing = ret
6073                 if pkg is not None:
6074                         settings = pkg.root_config.settings
6075                         if visible(settings, pkg) and not (pkg.installed and \
6076                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6077                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6078                 return ret
6079
6080         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6081                 root_config = self.roots[root]
6082                 pkgsettings = self.pkgsettings[root]
6083                 dbs = self._filtered_trees[root]["dbs"]
6084                 vardb = self.roots[root].trees["vartree"].dbapi
6085                 portdb = self.roots[root].trees["porttree"].dbapi
6086                 # List of acceptable packages, ordered by type preference.
6087                 matched_packages = []
6088                 highest_version = None
6089                 if not isinstance(atom, portage.dep.Atom):
6090                         atom = portage.dep.Atom(atom)
6091                 atom_cp = atom.cp
6092                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6093                 existing_node = None
6094                 myeb = None
6095                 usepkgonly = "--usepkgonly" in self.myopts
6096                 empty = "empty" in self.myparams
6097                 selective = "selective" in self.myparams
6098                 reinstall = False
6099                 noreplace = "--noreplace" in self.myopts
6100                 # Behavior of the "selective" parameter depends on
6101                 # whether or not a package matches an argument atom.
6102                 # If an installed package provides an old-style
6103                 # virtual that is no longer provided by an available
6104                 # package, the installed package may match an argument
6105                 # atom even though none of the available packages do.
6106                 # Therefore, "selective" logic does not consider
6107                 # whether or not an installed package matches an
6108                 # argument atom. It only considers whether or not
6109                 # available packages match argument atoms, which is
6110                 # represented by the found_available_arg flag.
6111                 found_available_arg = False
6112                 for find_existing_node in True, False:
6113                         if existing_node:
6114                                 break
6115                         for db, pkg_type, built, installed, db_keys in dbs:
6116                                 if existing_node:
6117                                         break
6118                                 if installed and not find_existing_node:
6119                                         want_reinstall = reinstall or empty or \
6120                                                 (found_available_arg and not selective)
6121                                         if want_reinstall and matched_packages:
6122                                                 continue
6123                                 if hasattr(db, "xmatch"):
6124                                         cpv_list = db.xmatch("match-all", atom)
6125                                 else:
6126                                         cpv_list = db.match(atom)
6127
6128                                 # USE=multislot can make an installed package appear as if
6129                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6130                                 # won't do any good as long as USE=multislot is enabled since
6131                                 # the newly built package still won't have the expected slot.
6132                                 # Therefore, assume that such SLOT dependencies are already
6133                                 # satisfied rather than forcing a rebuild.
6134                                 if installed and not cpv_list and atom.slot:
6135                                         for cpv in db.match(atom.cp):
6136                                                 slot_available = False
6137                                                 for other_db, other_type, other_built, \
6138                                                         other_installed, other_keys in dbs:
6139                                                         try:
6140                                                                 if atom.slot == \
6141                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6142                                                                         slot_available = True
6143                                                                         break
6144                                                         except KeyError:
6145                                                                 pass
6146                                                 if not slot_available:
6147                                                         continue
6148                                                 inst_pkg = self._pkg(cpv, "installed",
6149                                                         root_config, installed=installed)
6150                                                 # Remove the slot from the atom and verify that
6151                                                 # the package matches the resulting atom.
6152                                                 atom_without_slot = portage.dep.remove_slot(atom)
6153                                                 if atom.use:
6154                                                         atom_without_slot += str(atom.use)
6155                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6156                                                 if portage.match_from_list(
6157                                                         atom_without_slot, [inst_pkg]):
6158                                                         cpv_list = [inst_pkg.cpv]
6159                                                 break
6160
6161                                 if not cpv_list:
6162                                         continue
6163                                 pkg_status = "merge"
6164                                 if installed or onlydeps:
6165                                         pkg_status = "nomerge"
6166                                 # descending order
6167                                 cpv_list.reverse()
6168                                 for cpv in cpv_list:
6169                                         # Make --noreplace take precedence over --newuse.
6170                                         if not installed and noreplace and \
6171                                                 cpv in vardb.match(atom):
6172                                                 # If the installed version is masked, it may
6173                                                 # be necessary to look at lower versions,
6174                                                 # in case there is a visible downgrade.
6175                                                 continue
6176                                         reinstall_for_flags = None
6177                                         cache_key = (pkg_type, root, cpv, pkg_status)
6178                                         calculated_use = True
6179                                         pkg = self._pkg_cache.get(cache_key)
6180                                         if pkg is None:
6181                                                 calculated_use = False
6182                                                 try:
6183                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6184                                                 except KeyError:
6185                                                         continue
6186                                                 pkg = Package(built=built, cpv=cpv,
6187                                                         installed=installed, metadata=metadata,
6188                                                         onlydeps=onlydeps, root_config=root_config,
6189                                                         type_name=pkg_type)
6190                                                 metadata = pkg.metadata
6191                                                 if not built and ("?" in metadata["LICENSE"] or \
6192                                                         "?" in metadata["PROVIDE"]):
6193                                                         # This is avoided whenever possible because
6194                                                         # it's expensive. It only needs to be done here
6195                                                         # if it has an effect on visibility.
6196                                                         pkgsettings.setcpv(pkg)
6197                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6198                                                         calculated_use = True
6199                                                 self._pkg_cache[pkg] = pkg
6200
6201                                         if not installed or (built and matched_packages):
6202                                                 # Only enforce visibility on installed packages
6203                                                 # if there is at least one other visible package
6204                                                 # available. By filtering installed masked packages
6205                                                 # here, packages that have been masked since they
6206                                                 # were installed can be automatically downgraded
6207                                                 # to an unmasked version.
6208                                                 try:
6209                                                         if not visible(pkgsettings, pkg):
6210                                                                 continue
6211                                                 except portage.exception.InvalidDependString:
6212                                                         if not installed:
6213                                                                 continue
6214
6215                                                 # Enable upgrade or downgrade to a version
6216                                                 # with visible KEYWORDS when the installed
6217                                                 # version is masked by KEYWORDS, but never
6218                                                 # reinstall the same exact version only due
6219                                                 # to a KEYWORDS mask.
6220                                                 if built and matched_packages:
6221
6222                                                         different_version = None
6223                                                         for avail_pkg in matched_packages:
6224                                                                 if not portage.dep.cpvequal(
6225                                                                         pkg.cpv, avail_pkg.cpv):
6226                                                                         different_version = avail_pkg
6227                                                                         break
6228                                                         if different_version is not None:
6229
6230                                                                 if installed and \
6231                                                                         pkgsettings._getMissingKeywords(
6232                                                                         pkg.cpv, pkg.metadata):
6233                                                                         continue
6234
6235                                                                 # If the ebuild no longer exists or it's
6236                                                                 # keywords have been dropped, reject built
6237                                                                 # instances (installed or binary).
6238                                                                 # If --usepkgonly is enabled, assume that
6239                                                                 # the ebuild status should be ignored.
6240                                                                 if not usepkgonly:
6241                                                                         try:
6242                                                                                 pkg_eb = self._pkg(
6243                                                                                         pkg.cpv, "ebuild", root_config)
6244                                                                         except portage.exception.PackageNotFound:
6245                                                                                 continue
6246                                                                         else:
6247                                                                                 if not visible(pkgsettings, pkg_eb):
6248                                                                                         continue
6249
6250                                         if not pkg.built and not calculated_use:
6251                                                 # This is avoided whenever possible because
6252                                                 # it's expensive.
6253                                                 pkgsettings.setcpv(pkg)
6254                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6255
6256                                         if pkg.cp != atom.cp:
6257                                                 # A cpv can be returned from dbapi.match() as an
6258                                                 # old-style virtual match even in cases when the
6259                                                 # package does not actually PROVIDE the virtual.
6260                                                 # Filter out any such false matches here.
6261                                                 if not atom_set.findAtomForPackage(pkg):
6262                                                         continue
6263
6264                                         myarg = None
6265                                         if root == self.target_root:
6266                                                 try:
6267                                                         # Ebuild USE must have been calculated prior
6268                                                         # to this point, in case atoms have USE deps.
6269                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6270                                                 except StopIteration:
6271                                                         pass
6272                                                 except portage.exception.InvalidDependString:
6273                                                         if not installed:
6274                                                                 # masked by corruption
6275                                                                 continue
6276                                         if not installed and myarg:
6277                                                 found_available_arg = True
6278
6279                                         if atom.use and not pkg.built:
6280                                                 use = pkg.use.enabled
6281                                                 if atom.use.enabled.difference(use):
6282                                                         continue
6283                                                 if atom.use.disabled.intersection(use):
6284                                                         continue
6285                                         if pkg.cp == atom_cp:
6286                                                 if highest_version is None:
6287                                                         highest_version = pkg
6288                                                 elif pkg > highest_version:
6289                                                         highest_version = pkg
6290                                         # At this point, we've found the highest visible
6291                                         # match from the current repo. Any lower versions
6292                                         # from this repo are ignored, so this so the loop
6293                                         # will always end with a break statement below
6294                                         # this point.
6295                                         if find_existing_node:
6296                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6297                                                 if not e_pkg:
6298                                                         break
6299                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6300                                                         if highest_version and \
6301                                                                 e_pkg.cp == atom_cp and \
6302                                                                 e_pkg < highest_version and \
6303                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6304                                                                 # There is a higher version available in a
6305                                                                 # different slot, so this existing node is
6306                                                                 # irrelevant.
6307                                                                 pass
6308                                                         else:
6309                                                                 matched_packages.append(e_pkg)
6310                                                                 existing_node = e_pkg
6311                                                 break
6312                                         # Compare built package to current config and
6313                                         # reject the built package if necessary.
6314                                         if built and not installed and \
6315                                                 ("--newuse" in self.myopts or \
6316                                                 "--reinstall" in self.myopts):
6317                                                 iuses = pkg.iuse.all
6318                                                 old_use = pkg.use.enabled
6319                                                 if myeb:
6320                                                         pkgsettings.setcpv(myeb)
6321                                                 else:
6322                                                         pkgsettings.setcpv(pkg)
6323                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6324                                                 forced_flags = set()
6325                                                 forced_flags.update(pkgsettings.useforce)
6326                                                 forced_flags.update(pkgsettings.usemask)
6327                                                 cur_iuse = iuses
6328                                                 if myeb and not usepkgonly:
6329                                                         cur_iuse = myeb.iuse.all
6330                                                 if self._reinstall_for_flags(forced_flags,
6331                                                         old_use, iuses,
6332                                                         now_use, cur_iuse):
6333                                                         break
6334                                         # Compare current config to installed package
6335                                         # and do not reinstall if possible.
6336                                         if not installed and \
6337                                                 ("--newuse" in self.myopts or \
6338                                                 "--reinstall" in self.myopts) and \
6339                                                 cpv in vardb.match(atom):
6340                                                 pkgsettings.setcpv(pkg)
6341                                                 forced_flags = set()
6342                                                 forced_flags.update(pkgsettings.useforce)
6343                                                 forced_flags.update(pkgsettings.usemask)
6344                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6345                                                 old_iuse = set(filter_iuse_defaults(
6346                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6347                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6348                                                 cur_iuse = pkg.iuse.all
6349                                                 reinstall_for_flags = \
6350                                                         self._reinstall_for_flags(
6351                                                         forced_flags, old_use, old_iuse,
6352                                                         cur_use, cur_iuse)
6353                                                 if reinstall_for_flags:
6354                                                         reinstall = True
6355                                         if not built:
6356                                                 myeb = pkg
6357                                         matched_packages.append(pkg)
6358                                         if reinstall_for_flags:
6359                                                 self._reinstall_nodes[pkg] = \
6360                                                         reinstall_for_flags
6361                                         break
6362
6363                 if not matched_packages:
6364                         return None, None
6365
6366                 if "--debug" in self.myopts:
6367                         for pkg in matched_packages:
6368                                 portage.writemsg("%s %s\n" % \
6369                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6370
6371                 # Filter out any old-style virtual matches if they are
6372                 # mixed with new-style virtual matches.
6373                 cp = portage.dep_getkey(atom)
6374                 if len(matched_packages) > 1 and \
6375                         "virtual" == portage.catsplit(cp)[0]:
6376                         for pkg in matched_packages:
6377                                 if pkg.cp != cp:
6378                                         continue
6379                                 # Got a new-style virtual, so filter
6380                                 # out any old-style virtuals.
6381                                 matched_packages = [pkg for pkg in matched_packages \
6382                                         if pkg.cp == cp]
6383                                 break
6384
6385                 if len(matched_packages) > 1:
6386                         bestmatch = portage.best(
6387                                 [pkg.cpv for pkg in matched_packages])
6388                         matched_packages = [pkg for pkg in matched_packages \
6389                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6390
6391                 # ordered by type preference ("ebuild" type is the last resort)
6392                 return  matched_packages[-1], existing_node
6393
6394         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6395                 """
6396                 Select packages that have already been added to the graph or
6397                 those that are installed and have not been scheduled for
6398                 replacement.
6399                 """
6400                 graph_db = self._graph_trees[root]["porttree"].dbapi
6401                 matches = graph_db.match_pkgs(atom)
6402                 if not matches:
6403                         return None, None
6404                 pkg = matches[-1] # highest match
6405                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6406                 return pkg, in_graph
6407
6408         def _complete_graph(self):
6409                 """
6410                 Add any deep dependencies of required sets (args, system, world) that
6411                 have not been pulled into the graph yet. This ensures that the graph
6412                 is consistent such that initially satisfied deep dependencies are not
6413                 broken in the new graph. Initially unsatisfied dependencies are
6414                 irrelevant since we only want to avoid breaking dependencies that are
6415                 intially satisfied.
6416
6417                 Since this method can consume enough time to disturb users, it is
6418                 currently only enabled by the --complete-graph option.
6419                 """
6420                 if "--buildpkgonly" in self.myopts or \
6421                         "recurse" not in self.myparams:
6422                         return 1
6423
6424                 if "complete" not in self.myparams:
6425                         # Skip this to avoid consuming enough time to disturb users.
6426                         return 1
6427
6428                 # Put the depgraph into a mode that causes it to only
6429                 # select packages that have already been added to the
6430                 # graph or those that are installed and have not been
6431                 # scheduled for replacement. Also, toggle the "deep"
6432                 # parameter so that all dependencies are traversed and
6433                 # accounted for.
6434                 self._select_atoms = self._select_atoms_from_graph
6435                 self._select_package = self._select_pkg_from_graph
6436                 already_deep = "deep" in self.myparams
6437                 if not already_deep:
6438                         self.myparams.add("deep")
6439
6440                 for root in self.roots:
6441                         required_set_names = self._required_set_names.copy()
6442                         if root == self.target_root and \
6443                                 (already_deep or "empty" in self.myparams):
6444                                 required_set_names.difference_update(self._sets)
6445                         if not required_set_names and not self._ignored_deps:
6446                                 continue
6447                         root_config = self.roots[root]
6448                         setconfig = root_config.setconfig
6449                         args = []
6450                         # Reuse existing SetArg instances when available.
6451                         for arg in self.digraph.root_nodes():
6452                                 if not isinstance(arg, SetArg):
6453                                         continue
6454                                 if arg.root_config != root_config:
6455                                         continue
6456                                 if arg.name in required_set_names:
6457                                         args.append(arg)
6458                                         required_set_names.remove(arg.name)
6459                         # Create new SetArg instances only when necessary.
6460                         for s in required_set_names:
6461                                 expanded_set = InternalPackageSet(
6462                                         initial_atoms=setconfig.getSetAtoms(s))
6463                                 atom = SETPREFIX + s
6464                                 args.append(SetArg(arg=atom, set=expanded_set,
6465                                         root_config=root_config))
6466                         vardb = root_config.trees["vartree"].dbapi
6467                         for arg in args:
6468                                 for atom in arg.set:
6469                                         self._dep_stack.append(
6470                                                 Dependency(atom=atom, root=root, parent=arg))
6471                         if self._ignored_deps:
6472                                 self._dep_stack.extend(self._ignored_deps)
6473                                 self._ignored_deps = []
6474                         if not self._create_graph(allow_unsatisfied=True):
6475                                 return 0
6476                         # Check the unsatisfied deps to see if any initially satisfied deps
6477                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6478                         # deps are irrelevant since we only want to avoid breaking deps
6479                         # that are initially satisfied.
6480                         while self._unsatisfied_deps:
6481                                 dep = self._unsatisfied_deps.pop()
6482                                 matches = vardb.match_pkgs(dep.atom)
6483                                 if not matches:
6484                                         self._initially_unsatisfied_deps.append(dep)
6485                                         continue
6486                                 # An scheduled installation broke a deep dependency.
6487                                 # Add the installed package to the graph so that it
6488                                 # will be appropriately reported as a slot collision
6489                                 # (possibly solvable via backtracking).
6490                                 pkg = matches[-1] # highest match
6491                                 if not self._add_pkg(pkg, dep):
6492                                         return 0
6493                                 if not self._create_graph(allow_unsatisfied=True):
6494                                         return 0
6495                 return 1
6496
6497         def _pkg(self, cpv, type_name, root_config, installed=False):
6498                 """
6499                 Get a package instance from the cache, or create a new
6500                 one if necessary. Raises KeyError from aux_get if it
6501                 failures for some reason (package does not exist or is
6502                 corrupt).
6503                 """
6504                 operation = "merge"
6505                 if installed:
6506                         operation = "nomerge"
6507                 pkg = self._pkg_cache.get(
6508                         (type_name, root_config.root, cpv, operation))
6509                 if pkg is None:
6510                         tree_type = self.pkg_tree_map[type_name]
6511                         db = root_config.trees[tree_type].dbapi
6512                         db_keys = list(self._trees_orig[root_config.root][
6513                                 tree_type].dbapi._aux_cache_keys)
6514                         try:
6515                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6516                         except KeyError:
6517                                 raise portage.exception.PackageNotFound(cpv)
6518                         pkg = Package(cpv=cpv, metadata=metadata,
6519                                 root_config=root_config, installed=installed)
6520                         if type_name == "ebuild":
6521                                 settings = self.pkgsettings[root_config.root]
6522                                 settings.setcpv(pkg)
6523                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6524                         self._pkg_cache[pkg] = pkg
6525                 return pkg
6526
6527         def validate_blockers(self):
6528                 """Remove any blockers from the digraph that do not match any of the
6529                 packages within the graph.  If necessary, create hard deps to ensure
6530                 correct merge order such that mutually blocking packages are never
6531                 installed simultaneously."""
6532
6533                 if "--buildpkgonly" in self.myopts or \
6534                         "--nodeps" in self.myopts:
6535                         return True
6536
6537                 #if "deep" in self.myparams:
6538                 if True:
6539                         # Pull in blockers from all installed packages that haven't already
6540                         # been pulled into the depgraph.  This is not enabled by default
6541                         # due to the performance penalty that is incurred by all the
6542                         # additional dep_check calls that are required.
6543
6544                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6545                         for myroot in self.trees:
6546                                 vardb = self.trees[myroot]["vartree"].dbapi
6547                                 portdb = self.trees[myroot]["porttree"].dbapi
6548                                 pkgsettings = self.pkgsettings[myroot]
6549                                 final_db = self.mydbapi[myroot]
6550
6551                                 blocker_cache = BlockerCache(myroot, vardb)
6552                                 stale_cache = set(blocker_cache)
6553                                 for pkg in vardb:
6554                                         cpv = pkg.cpv
6555                                         stale_cache.discard(cpv)
6556                                         pkg_in_graph = self.digraph.contains(pkg)
6557
6558                                         # Check for masked installed packages. Only warn about
6559                                         # packages that are in the graph in order to avoid warning
6560                                         # about those that will be automatically uninstalled during
6561                                         # the merge process or by --depclean.
6562                                         if pkg in final_db:
6563                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6564                                                         self._masked_installed.add(pkg)
6565
6566                                         blocker_atoms = None
6567                                         blockers = None
6568                                         if pkg_in_graph:
6569                                                 blockers = []
6570                                                 try:
6571                                                         blockers.extend(
6572                                                                 self._blocker_parents.child_nodes(pkg))
6573                                                 except KeyError:
6574                                                         pass
6575                                                 try:
6576                                                         blockers.extend(
6577                                                                 self._irrelevant_blockers.child_nodes(pkg))
6578                                                 except KeyError:
6579                                                         pass
6580                                         if blockers is not None:
6581                                                 blockers = set(str(blocker.atom) \
6582                                                         for blocker in blockers)
6583
6584                                         # If this node has any blockers, create a "nomerge"
6585                                         # node for it so that they can be enforced.
6586                                         self.spinner.update()
6587                                         blocker_data = blocker_cache.get(cpv)
6588                                         if blocker_data is not None and \
6589                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6590                                                 blocker_data = None
6591
6592                                         # If blocker data from the graph is available, use
6593                                         # it to validate the cache and update the cache if
6594                                         # it seems invalid.
6595                                         if blocker_data is not None and \
6596                                                 blockers is not None:
6597                                                 if not blockers.symmetric_difference(
6598                                                         blocker_data.atoms):
6599                                                         continue
6600                                                 blocker_data = None
6601
6602                                         if blocker_data is None and \
6603                                                 blockers is not None:
6604                                                 # Re-use the blockers from the graph.
6605                                                 blocker_atoms = sorted(blockers)
6606                                                 counter = long(pkg.metadata["COUNTER"])
6607                                                 blocker_data = \
6608                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6609                                                 blocker_cache[pkg.cpv] = blocker_data
6610                                                 continue
6611
6612                                         if blocker_data:
6613                                                 blocker_atoms = blocker_data.atoms
6614                                         else:
6615                                                 # Use aux_get() to trigger FakeVartree global
6616                                                 # updates on *DEPEND when appropriate.
6617                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6618                                                 # It is crucial to pass in final_db here in order to
6619                                                 # optimize dep_check calls by eliminating atoms via
6620                                                 # dep_wordreduce and dep_eval calls.
6621                                                 try:
6622                                                         portage.dep._dep_check_strict = False
6623                                                         try:
6624                                                                 success, atoms = portage.dep_check(depstr,
6625                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6626                                                                         trees=self._graph_trees, myroot=myroot)
6627                                                         except Exception, e:
6628                                                                 if isinstance(e, SystemExit):
6629                                                                         raise
6630                                                                 # This is helpful, for example, if a ValueError
6631                                                                 # is thrown from cpv_expand due to multiple
6632                                                                 # matches (this can happen if an atom lacks a
6633                                                                 # category).
6634                                                                 show_invalid_depstring_notice(
6635                                                                         pkg, depstr, str(e))
6636                                                                 del e
6637                                                                 raise
6638                                                 finally:
6639                                                         portage.dep._dep_check_strict = True
6640                                                 if not success:
6641                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6642                                                         if replacement_pkg and \
6643                                                                 replacement_pkg[0].operation == "merge":
6644                                                                 # This package is being replaced anyway, so
6645                                                                 # ignore invalid dependencies so as not to
6646                                                                 # annoy the user too much (otherwise they'd be
6647                                                                 # forced to manually unmerge it first).
6648                                                                 continue
6649                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6650                                                         return False
6651                                                 blocker_atoms = [myatom for myatom in atoms \
6652                                                         if myatom.startswith("!")]
6653                                                 blocker_atoms.sort()
6654                                                 counter = long(pkg.metadata["COUNTER"])
6655                                                 blocker_cache[cpv] = \
6656                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6657                                         if blocker_atoms:
6658                                                 try:
6659                                                         for atom in blocker_atoms:
6660                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6661                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6662                                                                 self._blocker_parents.add(blocker, pkg)
6663                                                 except portage.exception.InvalidAtom, e:
6664                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6665                                                         show_invalid_depstring_notice(
6666                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6667                                                         return False
6668                                 for cpv in stale_cache:
6669                                         del blocker_cache[cpv]
6670                                 blocker_cache.flush()
6671                                 del blocker_cache
6672
6673                 # Discard any "uninstall" tasks scheduled by previous calls
6674                 # to this method, since those tasks may not make sense given
6675                 # the current graph state.
6676                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6677                 if previous_uninstall_tasks:
6678                         self._blocker_uninstalls = digraph()
6679                         self.digraph.difference_update(previous_uninstall_tasks)
6680
6681                 for blocker in self._blocker_parents.leaf_nodes():
6682                         self.spinner.update()
6683                         root_config = self.roots[blocker.root]
6684                         virtuals = root_config.settings.getvirtuals()
6685                         myroot = blocker.root
6686                         initial_db = self.trees[myroot]["vartree"].dbapi
6687                         final_db = self.mydbapi[myroot]
6688                         
6689                         provider_virtual = False
6690                         if blocker.cp in virtuals and \
6691                                 not self._have_new_virt(blocker.root, blocker.cp):
6692                                 provider_virtual = True
6693
6694                         if provider_virtual:
6695                                 atoms = []
6696                                 for provider_entry in virtuals[blocker.cp]:
6697                                         provider_cp = \
6698                                                 portage.dep_getkey(provider_entry)
6699                                         atoms.append(blocker.atom.replace(
6700                                                 blocker.cp, provider_cp))
6701                         else:
6702                                 atoms = [blocker.atom]
6703
6704                         blocked_initial = []
6705                         for atom in atoms:
6706                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6707
6708                         blocked_final = []
6709                         for atom in atoms:
6710                                 blocked_final.extend(final_db.match_pkgs(atom))
6711
6712                         if not blocked_initial and not blocked_final:
6713                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6714                                 self._blocker_parents.remove(blocker)
6715                                 # Discard any parents that don't have any more blockers.
6716                                 for pkg in parent_pkgs:
6717                                         self._irrelevant_blockers.add(blocker, pkg)
6718                                         if not self._blocker_parents.child_nodes(pkg):
6719                                                 self._blocker_parents.remove(pkg)
6720                                 continue
6721                         for parent in self._blocker_parents.parent_nodes(blocker):
6722                                 unresolved_blocks = False
6723                                 depends_on_order = set()
6724                                 for pkg in blocked_initial:
6725                                         if pkg.slot_atom == parent.slot_atom:
6726                                                 # TODO: Support blocks within slots in cases where it
6727                                                 # might make sense.  For example, a new version might
6728                                                 # require that the old version be uninstalled at build
6729                                                 # time.
6730                                                 continue
6731                                         if parent.installed:
6732                                                 # Two currently installed packages conflict with
6733                                                 # eachother. Ignore this case since the damage
6734                                                 # is already done and this would be likely to
6735                                                 # confuse users if displayed like a normal blocker.
6736                                                 continue
6737
6738                                         self._blocked_pkgs.add(pkg, blocker)
6739
6740                                         if parent.operation == "merge":
6741                                                 # Maybe the blocked package can be replaced or simply
6742                                                 # unmerged to resolve this block.
6743                                                 depends_on_order.add((pkg, parent))
6744                                                 continue
6745                                         # None of the above blocker resolutions techniques apply,
6746                                         # so apparently this one is unresolvable.
6747                                         unresolved_blocks = True
6748                                 for pkg in blocked_final:
6749                                         if pkg.slot_atom == parent.slot_atom:
6750                                                 # TODO: Support blocks within slots.
6751                                                 continue
6752                                         if parent.operation == "nomerge" and \
6753                                                 pkg.operation == "nomerge":
6754                                                 # This blocker will be handled the next time that a
6755                                                 # merge of either package is triggered.
6756                                                 continue
6757
6758                                         self._blocked_pkgs.add(pkg, blocker)
6759
6760                                         # Maybe the blocking package can be
6761                                         # unmerged to resolve this block.
6762                                         if parent.operation == "merge" and pkg.installed:
6763                                                 depends_on_order.add((pkg, parent))
6764                                                 continue
6765                                         elif parent.operation == "nomerge":
6766                                                 depends_on_order.add((parent, pkg))
6767                                                 continue
6768                                         # None of the above blocker resolutions techniques apply,
6769                                         # so apparently this one is unresolvable.
6770                                         unresolved_blocks = True
6771
6772                                 # Make sure we don't unmerge any package that have been pulled
6773                                 # into the graph.
6774                                 if not unresolved_blocks and depends_on_order:
6775                                         for inst_pkg, inst_task in depends_on_order:
6776                                                 if self.digraph.contains(inst_pkg) and \
6777                                                         self.digraph.parent_nodes(inst_pkg):
6778                                                         unresolved_blocks = True
6779                                                         break
6780
6781                                 if not unresolved_blocks and depends_on_order:
6782                                         for inst_pkg, inst_task in depends_on_order:
6783                                                 uninst_task = Package(built=inst_pkg.built,
6784                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6785                                                         metadata=inst_pkg.metadata,
6786                                                         operation="uninstall",
6787                                                         root_config=inst_pkg.root_config,
6788                                                         type_name=inst_pkg.type_name)
6789                                                 self._pkg_cache[uninst_task] = uninst_task
6790                                                 # Enforce correct merge order with a hard dep.
6791                                                 self.digraph.addnode(uninst_task, inst_task,
6792                                                         priority=BlockerDepPriority.instance)
6793                                                 # Count references to this blocker so that it can be
6794                                                 # invalidated after nodes referencing it have been
6795                                                 # merged.
6796                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6797                                 if not unresolved_blocks and not depends_on_order:
6798                                         self._irrelevant_blockers.add(blocker, parent)
6799                                         self._blocker_parents.remove_edge(blocker, parent)
6800                                         if not self._blocker_parents.parent_nodes(blocker):
6801                                                 self._blocker_parents.remove(blocker)
6802                                         if not self._blocker_parents.child_nodes(parent):
6803                                                 self._blocker_parents.remove(parent)
6804                                 if unresolved_blocks:
6805                                         self._unsolvable_blockers.add(blocker, parent)
6806
6807                 return True
6808
6809         def _accept_blocker_conflicts(self):
6810                 acceptable = False
6811                 for x in ("--buildpkgonly", "--fetchonly",
6812                         "--fetch-all-uri", "--nodeps"):
6813                         if x in self.myopts:
6814                                 acceptable = True
6815                                 break
6816                 return acceptable
6817
6818         def _merge_order_bias(self, mygraph):
6819                 """
6820                 For optimal leaf node selection, promote deep system runtime deps and
6821                 order nodes from highest to lowest overall reference count.
6822                 """
6823
6824                 node_info = {}
6825                 for node in mygraph.order:
6826                         node_info[node] = len(mygraph.parent_nodes(node))
6827                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6828
6829                 def cmp_merge_preference(node1, node2):
6830
6831                         if node1.operation == 'uninstall':
6832                                 if node2.operation == 'uninstall':
6833                                         return 0
6834                                 return 1
6835
6836                         if node2.operation == 'uninstall':
6837                                 if node1.operation == 'uninstall':
6838                                         return 0
6839                                 return -1
6840
6841                         node1_sys = node1 in deep_system_deps
6842                         node2_sys = node2 in deep_system_deps
6843                         if node1_sys != node2_sys:
6844                                 if node1_sys:
6845                                         return -1
6846                                 return 1
6847
6848                         return node_info[node2] - node_info[node1]
6849
6850                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6851
6852         def altlist(self, reversed=False):
6853
6854                 while self._serialized_tasks_cache is None:
6855                         self._resolve_conflicts()
6856                         try:
6857                                 self._serialized_tasks_cache, self._scheduler_graph = \
6858                                         self._serialize_tasks()
6859                         except self._serialize_tasks_retry:
6860                                 pass
6861
6862                 retlist = self._serialized_tasks_cache[:]
6863                 if reversed:
6864                         retlist.reverse()
6865                 return retlist
6866
6867         def schedulerGraph(self):
6868                 """
6869                 The scheduler graph is identical to the normal one except that
6870                 uninstall edges are reversed in specific cases that require
6871                 conflicting packages to be temporarily installed simultaneously.
6872                 This is intended for use by the Scheduler in it's parallelization
6873                 logic. It ensures that temporary simultaneous installation of
6874                 conflicting packages is avoided when appropriate (especially for
6875                 !!atom blockers), but allowed in specific cases that require it.
6876
6877                 Note that this method calls break_refs() which alters the state of
6878                 internal Package instances such that this depgraph instance should
6879                 not be used to perform any more calculations.
6880                 """
6881                 if self._scheduler_graph is None:
6882                         self.altlist()
6883                 self.break_refs(self._scheduler_graph.order)
6884                 return self._scheduler_graph
6885
6886         def break_refs(self, nodes):
6887                 """
6888                 Take a mergelist like that returned from self.altlist() and
6889                 break any references that lead back to the depgraph. This is
6890                 useful if you want to hold references to packages without
6891                 also holding the depgraph on the heap.
6892                 """
6893                 for node in nodes:
6894                         if hasattr(node, "root_config"):
6895                                 # The FakeVartree references the _package_cache which
6896                                 # references the depgraph. So that Package instances don't
6897                                 # hold the depgraph and FakeVartree on the heap, replace
6898                                 # the RootConfig that references the FakeVartree with the
6899                                 # original RootConfig instance which references the actual
6900                                 # vartree.
6901                                 node.root_config = \
6902                                         self._trees_orig[node.root_config.root]["root_config"]
6903
6904         def _resolve_conflicts(self):
6905                 if not self._complete_graph():
6906                         raise self._unknown_internal_error()
6907
6908                 if not self.validate_blockers():
6909                         raise self._unknown_internal_error()
6910
6911                 if self._slot_collision_info:
6912                         self._process_slot_conflicts()
6913
6914         def _serialize_tasks(self):
6915
6916                 if "--debug" in self.myopts:
6917                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6918                         self.digraph.debug_print()
6919                         writemsg("\n", noiselevel=-1)
6920
6921                 scheduler_graph = self.digraph.copy()
6922                 mygraph=self.digraph.copy()
6923                 # Prune "nomerge" root nodes if nothing depends on them, since
6924                 # otherwise they slow down merge order calculation. Don't remove
6925                 # non-root nodes since they help optimize merge order in some cases
6926                 # such as revdep-rebuild.
6927                 removed_nodes = set()
6928                 while True:
6929                         for node in mygraph.root_nodes():
6930                                 if not isinstance(node, Package) or \
6931                                         node.installed or node.onlydeps:
6932                                         removed_nodes.add(node)
6933                         if removed_nodes:
6934                                 self.spinner.update()
6935                                 mygraph.difference_update(removed_nodes)
6936                         if not removed_nodes:
6937                                 break
6938                         removed_nodes.clear()
6939                 self._merge_order_bias(mygraph)
6940                 def cmp_circular_bias(n1, n2):
6941                         """
6942                         RDEPEND is stronger than PDEPEND and this function
6943                         measures such a strength bias within a circular
6944                         dependency relationship.
6945                         """
6946                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6947                                 ignore_priority=priority_range.ignore_medium_soft)
6948                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6949                                 ignore_priority=priority_range.ignore_medium_soft)
6950                         if n1_n2_medium == n2_n1_medium:
6951                                 return 0
6952                         elif n1_n2_medium:
6953                                 return 1
6954                         return -1
6955                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6956                 retlist=[]
6957                 # Contains uninstall tasks that have been scheduled to
6958                 # occur after overlapping blockers have been installed.
6959                 scheduled_uninstalls = set()
6960                 # Contains any Uninstall tasks that have been ignored
6961                 # in order to avoid the circular deps code path. These
6962                 # correspond to blocker conflicts that could not be
6963                 # resolved.
6964                 ignored_uninstall_tasks = set()
6965                 have_uninstall_task = False
6966                 complete = "complete" in self.myparams
6967                 asap_nodes = []
6968
6969                 def get_nodes(**kwargs):
6970                         """
6971                         Returns leaf nodes excluding Uninstall instances
6972                         since those should be executed as late as possible.
6973                         """
6974                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6975                                 if isinstance(node, Package) and \
6976                                         (node.operation != "uninstall" or \
6977                                         node in scheduled_uninstalls)]
6978
6979                 # sys-apps/portage needs special treatment if ROOT="/"
6980                 running_root = self._running_root.root
6981                 from portage.const import PORTAGE_PACKAGE_ATOM
6982                 runtime_deps = InternalPackageSet(
6983                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6984                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6985                         PORTAGE_PACKAGE_ATOM)
6986                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6987                         PORTAGE_PACKAGE_ATOM)
6988
6989                 if running_portage:
6990                         running_portage = running_portage[0]
6991                 else:
6992                         running_portage = None
6993
6994                 if replacement_portage:
6995                         replacement_portage = replacement_portage[0]
6996                 else:
6997                         replacement_portage = None
6998
6999                 if replacement_portage == running_portage:
7000                         replacement_portage = None
7001
7002                 if replacement_portage is not None:
7003                         # update from running_portage to replacement_portage asap
7004                         asap_nodes.append(replacement_portage)
7005
7006                 if running_portage is not None:
7007                         try:
7008                                 portage_rdepend = self._select_atoms_highest_available(
7009                                         running_root, running_portage.metadata["RDEPEND"],
7010                                         myuse=running_portage.use.enabled,
7011                                         parent=running_portage, strict=False)
7012                         except portage.exception.InvalidDependString, e:
7013                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7014                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7015                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7016                                 del e
7017                                 portage_rdepend = []
7018                         runtime_deps.update(atom for atom in portage_rdepend \
7019                                 if not atom.startswith("!"))
7020
7021                 def gather_deps(ignore_priority, mergeable_nodes,
7022                         selected_nodes, node):
7023                         """
7024                         Recursively gather a group of nodes that RDEPEND on
7025                         eachother. This ensures that they are merged as a group
7026                         and get their RDEPENDs satisfied as soon as possible.
7027                         """
7028                         if node in selected_nodes:
7029                                 return True
7030                         if node not in mergeable_nodes:
7031                                 return False
7032                         if node == replacement_portage and \
7033                                 mygraph.child_nodes(node,
7034                                 ignore_priority=priority_range.ignore_medium_soft):
7035                                 # Make sure that portage always has all of it's
7036                                 # RDEPENDs installed first.
7037                                 return False
7038                         selected_nodes.add(node)
7039                         for child in mygraph.child_nodes(node,
7040                                 ignore_priority=ignore_priority):
7041                                 if not gather_deps(ignore_priority,
7042                                         mergeable_nodes, selected_nodes, child):
7043                                         return False
7044                         return True
7045
7046                 def ignore_uninst_or_med(priority):
7047                         if priority is BlockerDepPriority.instance:
7048                                 return True
7049                         return priority_range.ignore_medium(priority)
7050
7051                 def ignore_uninst_or_med_soft(priority):
7052                         if priority is BlockerDepPriority.instance:
7053                                 return True
7054                         return priority_range.ignore_medium_soft(priority)
7055
7056                 tree_mode = "--tree" in self.myopts
7057                 # Tracks whether or not the current iteration should prefer asap_nodes
7058                 # if available.  This is set to False when the previous iteration
7059                 # failed to select any nodes.  It is reset whenever nodes are
7060                 # successfully selected.
7061                 prefer_asap = True
7062
7063                 # Controls whether or not the current iteration should drop edges that
7064                 # are "satisfied" by installed packages, in order to solve circular
7065                 # dependencies. The deep runtime dependencies of installed packages are
7066                 # not checked in this case (bug #199856), so it must be avoided
7067                 # whenever possible.
7068                 drop_satisfied = False
7069
7070                 # State of variables for successive iterations that loosen the
7071                 # criteria for node selection.
7072                 #
7073                 # iteration   prefer_asap   drop_satisfied
7074                 # 1           True          False
7075                 # 2           False         False
7076                 # 3           False         True
7077                 #
7078                 # If no nodes are selected on the last iteration, it is due to
7079                 # unresolved blockers or circular dependencies.
7080
7081                 while not mygraph.empty():
7082                         self.spinner.update()
7083                         selected_nodes = None
7084                         ignore_priority = None
7085                         if drop_satisfied or (prefer_asap and asap_nodes):
7086                                 priority_range = DepPrioritySatisfiedRange
7087                         else:
7088                                 priority_range = DepPriorityNormalRange
7089                         if prefer_asap and asap_nodes:
7090                                 # ASAP nodes are merged before their soft deps. Go ahead and
7091                                 # select root nodes here if necessary, since it's typical for
7092                                 # the parent to have been removed from the graph already.
7093                                 asap_nodes = [node for node in asap_nodes \
7094                                         if mygraph.contains(node)]
7095                                 for node in asap_nodes:
7096                                         if not mygraph.child_nodes(node,
7097                                                 ignore_priority=priority_range.ignore_soft):
7098                                                 selected_nodes = [node]
7099                                                 asap_nodes.remove(node)
7100                                                 break
7101                         if not selected_nodes and \
7102                                 not (prefer_asap and asap_nodes):
7103                                 for i in xrange(priority_range.NONE,
7104                                         priority_range.MEDIUM_SOFT + 1):
7105                                         ignore_priority = priority_range.ignore_priority[i]
7106                                         nodes = get_nodes(ignore_priority=ignore_priority)
7107                                         if nodes:
7108                                                 # If there is a mix of uninstall nodes with other
7109                                                 # types, save the uninstall nodes for later since
7110                                                 # sometimes a merge node will render an uninstall
7111                                                 # node unnecessary (due to occupying the same slot),
7112                                                 # and we want to avoid executing a separate uninstall
7113                                                 # task in that case.
7114                                                 if len(nodes) > 1:
7115                                                         good_uninstalls = []
7116                                                         with_some_uninstalls_excluded = []
7117                                                         for node in nodes:
7118                                                                 if node.operation == "uninstall":
7119                                                                         slot_node = self.mydbapi[node.root
7120                                                                                 ].match_pkgs(node.slot_atom)
7121                                                                         if slot_node and \
7122                                                                                 slot_node[0].operation == "merge":
7123                                                                                 continue
7124                                                                         good_uninstalls.append(node)
7125                                                                 with_some_uninstalls_excluded.append(node)
7126                                                         if good_uninstalls:
7127                                                                 nodes = good_uninstalls
7128                                                         elif with_some_uninstalls_excluded:
7129                                                                 nodes = with_some_uninstalls_excluded
7130                                                         else:
7131                                                                 nodes = nodes
7132
7133                                                 if ignore_priority is None and not tree_mode:
7134                                                         # Greedily pop all of these nodes since no
7135                                                         # relationship has been ignored. This optimization
7136                                                         # destroys --tree output, so it's disabled in tree
7137                                                         # mode.
7138                                                         selected_nodes = nodes
7139                                                 else:
7140                                                         # For optimal merge order:
7141                                                         #  * Only pop one node.
7142                                                         #  * Removing a root node (node without a parent)
7143                                                         #    will not produce a leaf node, so avoid it.
7144                                                         #  * It's normal for a selected uninstall to be a
7145                                                         #    root node, so don't check them for parents.
7146                                                         for node in nodes:
7147                                                                 if node.operation == "uninstall" or \
7148                                                                         mygraph.parent_nodes(node):
7149                                                                         selected_nodes = [node]
7150                                                                         break
7151
7152                                                 if selected_nodes:
7153                                                         break
7154
7155                         if not selected_nodes:
7156                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7157                                 if nodes:
7158                                         mergeable_nodes = set(nodes)
7159                                         if prefer_asap and asap_nodes:
7160                                                 nodes = asap_nodes
7161                                         for i in xrange(priority_range.SOFT,
7162                                                 priority_range.MEDIUM_SOFT + 1):
7163                                                 ignore_priority = priority_range.ignore_priority[i]
7164                                                 for node in nodes:
7165                                                         if not mygraph.parent_nodes(node):
7166                                                                 continue
7167                                                         selected_nodes = set()
7168                                                         if gather_deps(ignore_priority,
7169                                                                 mergeable_nodes, selected_nodes, node):
7170                                                                 break
7171                                                         else:
7172                                                                 selected_nodes = None
7173                                                 if selected_nodes:
7174                                                         break
7175
7176                                         if prefer_asap and asap_nodes and not selected_nodes:
7177                                                 # We failed to find any asap nodes to merge, so ignore
7178                                                 # them for the next iteration.
7179                                                 prefer_asap = False
7180                                                 continue
7181
7182                         if selected_nodes and ignore_priority is not None:
7183                                 # Try to merge ignored medium_soft deps as soon as possible
7184                                 # if they're not satisfied by installed packages.
7185                                 for node in selected_nodes:
7186                                         children = set(mygraph.child_nodes(node))
7187                                         soft = children.difference(
7188                                                 mygraph.child_nodes(node,
7189                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7190                                         medium_soft = children.difference(
7191                                                 mygraph.child_nodes(node,
7192                                                         ignore_priority = \
7193                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7194                                         medium_soft.difference_update(soft)
7195                                         for child in medium_soft:
7196                                                 if child in selected_nodes:
7197                                                         continue
7198                                                 if child in asap_nodes:
7199                                                         continue
7200                                                 asap_nodes.append(child)
7201
7202                         if selected_nodes and len(selected_nodes) > 1:
7203                                 if not isinstance(selected_nodes, list):
7204                                         selected_nodes = list(selected_nodes)
7205                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7206
7207                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7208                                 # An Uninstall task needs to be executed in order to
7209                                 # avoid conflict if possible.
7210
7211                                 if drop_satisfied:
7212                                         priority_range = DepPrioritySatisfiedRange
7213                                 else:
7214                                         priority_range = DepPriorityNormalRange
7215
7216                                 mergeable_nodes = get_nodes(
7217                                         ignore_priority=ignore_uninst_or_med)
7218
7219                                 min_parent_deps = None
7220                                 uninst_task = None
7221                                 for task in myblocker_uninstalls.leaf_nodes():
7222                                         # Do some sanity checks so that system or world packages
7223                                         # don't get uninstalled inappropriately here (only really
7224                                         # necessary when --complete-graph has not been enabled).
7225
7226                                         if task in ignored_uninstall_tasks:
7227                                                 continue
7228
7229                                         if task in scheduled_uninstalls:
7230                                                 # It's been scheduled but it hasn't
7231                                                 # been executed yet due to dependence
7232                                                 # on installation of blocking packages.
7233                                                 continue
7234
7235                                         root_config = self.roots[task.root]
7236                                         inst_pkg = self._pkg_cache[
7237                                                 ("installed", task.root, task.cpv, "nomerge")]
7238
7239                                         if self.digraph.contains(inst_pkg):
7240                                                 continue
7241
7242                                         forbid_overlap = False
7243                                         heuristic_overlap = False
7244                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7245                                                 if blocker.eapi in ("0", "1"):
7246                                                         heuristic_overlap = True
7247                                                 elif blocker.atom.blocker.overlap.forbid:
7248                                                         forbid_overlap = True
7249                                                         break
7250                                         if forbid_overlap and running_root == task.root:
7251                                                 continue
7252
7253                                         if heuristic_overlap and running_root == task.root:
7254                                                 # Never uninstall sys-apps/portage or it's essential
7255                                                 # dependencies, except through replacement.
7256                                                 try:
7257                                                         runtime_dep_atoms = \
7258                                                                 list(runtime_deps.iterAtomsForPackage(task))
7259                                                 except portage.exception.InvalidDependString, e:
7260                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7261                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7262                                                                 (task.root, task.cpv, e), noiselevel=-1)
7263                                                         del e
7264                                                         continue
7265
7266                                                 # Don't uninstall a runtime dep if it appears
7267                                                 # to be the only suitable one installed.
7268                                                 skip = False
7269                                                 vardb = root_config.trees["vartree"].dbapi
7270                                                 for atom in runtime_dep_atoms:
7271                                                         other_version = None
7272                                                         for pkg in vardb.match_pkgs(atom):
7273                                                                 if pkg.cpv == task.cpv and \
7274                                                                         pkg.metadata["COUNTER"] == \
7275                                                                         task.metadata["COUNTER"]:
7276                                                                         continue
7277                                                                 other_version = pkg
7278                                                                 break
7279                                                         if other_version is None:
7280                                                                 skip = True
7281                                                                 break
7282                                                 if skip:
7283                                                         continue
7284
7285                                                 # For packages in the system set, don't take
7286                                                 # any chances. If the conflict can't be resolved
7287                                                 # by a normal replacement operation then abort.
7288                                                 skip = False
7289                                                 try:
7290                                                         for atom in root_config.sets[
7291                                                                 "system"].iterAtomsForPackage(task):
7292                                                                 skip = True
7293                                                                 break
7294                                                 except portage.exception.InvalidDependString, e:
7295                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7296                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7297                                                                 (task.root, task.cpv, e), noiselevel=-1)
7298                                                         del e
7299                                                         skip = True
7300                                                 if skip:
7301                                                         continue
7302
7303                                         # Note that the world check isn't always
7304                                         # necessary since self._complete_graph() will
7305                                         # add all packages from the system and world sets to the
7306                                         # graph. This just allows unresolved conflicts to be
7307                                         # detected as early as possible, which makes it possible
7308                                         # to avoid calling self._complete_graph() when it is
7309                                         # unnecessary due to blockers triggering an abortion.
7310                                         if not complete:
7311                                                 # For packages in the world set, go ahead an uninstall
7312                                                 # when necessary, as long as the atom will be satisfied
7313                                                 # in the final state.
7314                                                 graph_db = self.mydbapi[task.root]
7315                                                 skip = False
7316                                                 try:
7317                                                         for atom in root_config.sets[
7318                                                                 "world"].iterAtomsForPackage(task):
7319                                                                 satisfied = False
7320                                                                 for pkg in graph_db.match_pkgs(atom):
7321                                                                         if pkg == inst_pkg:
7322                                                                                 continue
7323                                                                         satisfied = True
7324                                                                         break
7325                                                                 if not satisfied:
7326                                                                         skip = True
7327                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7328                                                                         break
7329                                                 except portage.exception.InvalidDependString, e:
7330                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7331                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7332                                                                 (task.root, task.cpv, e), noiselevel=-1)
7333                                                         del e
7334                                                         skip = True
7335                                                 if skip:
7336                                                         continue
7337
7338                                         # Check the deps of parent nodes to ensure that
7339                                         # the chosen task produces a leaf node. Maybe
7340                                         # this can be optimized some more to make the
7341                                         # best possible choice, but the current algorithm
7342                                         # is simple and should be near optimal for most
7343                                         # common cases.
7344                                         mergeable_parent = False
7345                                         parent_deps = set()
7346                                         for parent in mygraph.parent_nodes(task):
7347                                                 parent_deps.update(mygraph.child_nodes(parent,
7348                                                         ignore_priority=priority_range.ignore_medium_soft))
7349                                                 if parent in mergeable_nodes and \
7350                                                         gather_deps(ignore_uninst_or_med_soft,
7351                                                         mergeable_nodes, set(), parent):
7352                                                         mergeable_parent = True
7353
7354                                         if not mergeable_parent:
7355                                                 continue
7356
7357                                         parent_deps.remove(task)
7358                                         if min_parent_deps is None or \
7359                                                 len(parent_deps) < min_parent_deps:
7360                                                 min_parent_deps = len(parent_deps)
7361                                                 uninst_task = task
7362
7363                                 if uninst_task is not None:
7364                                         # The uninstall is performed only after blocking
7365                                         # packages have been merged on top of it. File
7366                                         # collisions between blocking packages are detected
7367                                         # and removed from the list of files to be uninstalled.
7368                                         scheduled_uninstalls.add(uninst_task)
7369                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7370
7371                                         # Reverse the parent -> uninstall edges since we want
7372                                         # to do the uninstall after blocking packages have
7373                                         # been merged on top of it.
7374                                         mygraph.remove(uninst_task)
7375                                         for blocked_pkg in parent_nodes:
7376                                                 mygraph.add(blocked_pkg, uninst_task,
7377                                                         priority=BlockerDepPriority.instance)
7378                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7379                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7380                                                         priority=BlockerDepPriority.instance)
7381
7382                                         # Reset the state variables for leaf node selection and
7383                                         # continue trying to select leaf nodes.
7384                                         prefer_asap = True
7385                                         drop_satisfied = False
7386                                         continue
7387
7388                         if not selected_nodes:
7389                                 # Only select root nodes as a last resort. This case should
7390                                 # only trigger when the graph is nearly empty and the only
7391                                 # remaining nodes are isolated (no parents or children). Since
7392                                 # the nodes must be isolated, ignore_priority is not needed.
7393                                 selected_nodes = get_nodes()
7394
7395                         if not selected_nodes and not drop_satisfied:
7396                                 drop_satisfied = True
7397                                 continue
7398
7399                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7400                                 # If possible, drop an uninstall task here in order to avoid
7401                                 # the circular deps code path. The corresponding blocker will
7402                                 # still be counted as an unresolved conflict.
7403                                 uninst_task = None
7404                                 for node in myblocker_uninstalls.leaf_nodes():
7405                                         try:
7406                                                 mygraph.remove(node)
7407                                         except KeyError:
7408                                                 pass
7409                                         else:
7410                                                 uninst_task = node
7411                                                 ignored_uninstall_tasks.add(node)
7412                                                 break
7413
7414                                 if uninst_task is not None:
7415                                         # Reset the state variables for leaf node selection and
7416                                         # continue trying to select leaf nodes.
7417                                         prefer_asap = True
7418                                         drop_satisfied = False
7419                                         continue
7420
7421                         if not selected_nodes:
7422                                 self._circular_deps_for_display = mygraph
7423                                 raise self._unknown_internal_error()
7424
7425                         # At this point, we've succeeded in selecting one or more nodes, so
7426                         # reset state variables for leaf node selection.
7427                         prefer_asap = True
7428                         drop_satisfied = False
7429
7430                         mygraph.difference_update(selected_nodes)
7431
7432                         for node in selected_nodes:
7433                                 if isinstance(node, Package) and \
7434                                         node.operation == "nomerge":
7435                                         continue
7436
7437                                 # Handle interactions between blockers
7438                                 # and uninstallation tasks.
7439                                 solved_blockers = set()
7440                                 uninst_task = None
7441                                 if isinstance(node, Package) and \
7442                                         "uninstall" == node.operation:
7443                                         have_uninstall_task = True
7444                                         uninst_task = node
7445                                 else:
7446                                         vardb = self.trees[node.root]["vartree"].dbapi
7447                                         previous_cpv = vardb.match(node.slot_atom)
7448                                         if previous_cpv:
7449                                                 # The package will be replaced by this one, so remove
7450                                                 # the corresponding Uninstall task if necessary.
7451                                                 previous_cpv = previous_cpv[0]
7452                                                 uninst_task = \
7453                                                         ("installed", node.root, previous_cpv, "uninstall")
7454                                                 try:
7455                                                         mygraph.remove(uninst_task)
7456                                                 except KeyError:
7457                                                         pass
7458
7459                                 if uninst_task is not None and \
7460                                         uninst_task not in ignored_uninstall_tasks and \
7461                                         myblocker_uninstalls.contains(uninst_task):
7462                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7463                                         myblocker_uninstalls.remove(uninst_task)
7464                                         # Discard any blockers that this Uninstall solves.
7465                                         for blocker in blocker_nodes:
7466                                                 if not myblocker_uninstalls.child_nodes(blocker):
7467                                                         myblocker_uninstalls.remove(blocker)
7468                                                         solved_blockers.add(blocker)
7469
7470                                 retlist.append(node)
7471
7472                                 if (isinstance(node, Package) and \
7473                                         "uninstall" == node.operation) or \
7474                                         (uninst_task is not None and \
7475                                         uninst_task in scheduled_uninstalls):
7476                                         # Include satisfied blockers in the merge list
7477                                         # since the user might be interested and also
7478                                         # it serves as an indicator that blocking packages
7479                                         # will be temporarily installed simultaneously.
7480                                         for blocker in solved_blockers:
7481                                                 retlist.append(Blocker(atom=blocker.atom,
7482                                                         root=blocker.root, eapi=blocker.eapi,
7483                                                         satisfied=True))
7484
7485                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7486                 for node in myblocker_uninstalls.root_nodes():
7487                         unsolvable_blockers.add(node)
7488
7489                 for blocker in unsolvable_blockers:
7490                         retlist.append(blocker)
7491
7492                 # If any Uninstall tasks need to be executed in order
7493                 # to avoid a conflict, complete the graph with any
7494                 # dependencies that may have been initially
7495                 # neglected (to ensure that unsafe Uninstall tasks
7496                 # are properly identified and blocked from execution).
7497                 if have_uninstall_task and \
7498                         not complete and \
7499                         not unsolvable_blockers:
7500                         self.myparams.add("complete")
7501                         raise self._serialize_tasks_retry("")
7502
7503                 if unsolvable_blockers and \
7504                         not self._accept_blocker_conflicts():
7505                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7506                         self._serialized_tasks_cache = retlist[:]
7507                         self._scheduler_graph = scheduler_graph
7508                         raise self._unknown_internal_error()
7509
7510                 if self._slot_collision_info and \
7511                         not self._accept_blocker_conflicts():
7512                         self._serialized_tasks_cache = retlist[:]
7513                         self._scheduler_graph = scheduler_graph
7514                         raise self._unknown_internal_error()
7515
7516                 return retlist, scheduler_graph
7517
7518         def _show_circular_deps(self, mygraph):
7519                 # No leaf nodes are available, so we have a circular
7520                 # dependency panic situation.  Reduce the noise level to a
7521                 # minimum via repeated elimination of root nodes since they
7522                 # have no parents and thus can not be part of a cycle.
7523                 while True:
7524                         root_nodes = mygraph.root_nodes(
7525                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7526                         if not root_nodes:
7527                                 break
7528                         mygraph.difference_update(root_nodes)
7529                 # Display the USE flags that are enabled on nodes that are part
7530                 # of dependency cycles in case that helps the user decide to
7531                 # disable some of them.
7532                 display_order = []
7533                 tempgraph = mygraph.copy()
7534                 while not tempgraph.empty():
7535                         nodes = tempgraph.leaf_nodes()
7536                         if not nodes:
7537                                 node = tempgraph.order[0]
7538                         else:
7539                                 node = nodes[0]
7540                         display_order.append(node)
7541                         tempgraph.remove(node)
7542                 display_order.reverse()
7543                 self.myopts.pop("--quiet", None)
7544                 self.myopts.pop("--verbose", None)
7545                 self.myopts["--tree"] = True
7546                 portage.writemsg("\n\n", noiselevel=-1)
7547                 self.display(display_order)
7548                 prefix = colorize("BAD", " * ")
7549                 portage.writemsg("\n", noiselevel=-1)
7550                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7551                         noiselevel=-1)
7552                 portage.writemsg("\n", noiselevel=-1)
7553                 mygraph.debug_print()
7554                 portage.writemsg("\n", noiselevel=-1)
7555                 portage.writemsg(prefix + "Note that circular dependencies " + \
7556                         "can often be avoided by temporarily\n", noiselevel=-1)
7557                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7558                         "optional dependencies.\n", noiselevel=-1)
7559
7560         def _show_merge_list(self):
7561                 if self._serialized_tasks_cache is not None and \
7562                         not (self._displayed_list and \
7563                         (self._displayed_list == self._serialized_tasks_cache or \
7564                         self._displayed_list == \
7565                                 list(reversed(self._serialized_tasks_cache)))):
7566                         display_list = self._serialized_tasks_cache[:]
7567                         if "--tree" in self.myopts:
7568                                 display_list.reverse()
7569                         self.display(display_list)
7570
7571         def _show_unsatisfied_blockers(self, blockers):
7572                 self._show_merge_list()
7573                 msg = "Error: The above package list contains " + \
7574                         "packages which cannot be installed " + \
7575                         "at the same time on the same system."
7576                 prefix = colorize("BAD", " * ")
7577                 from textwrap import wrap
7578                 portage.writemsg("\n", noiselevel=-1)
7579                 for line in wrap(msg, 70):
7580                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7581
7582                 # Display the conflicting packages along with the packages
7583                 # that pulled them in. This is helpful for troubleshooting
7584                 # cases in which blockers don't solve automatically and
7585                 # the reasons are not apparent from the normal merge list
7586                 # display.
7587
7588                 conflict_pkgs = {}
7589                 for blocker in blockers:
7590                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7591                                 self._blocker_parents.parent_nodes(blocker)):
7592                                 parent_atoms = self._parent_atoms.get(pkg)
7593                                 if not parent_atoms:
7594                                         atom = self._blocked_world_pkgs.get(pkg)
7595                                         if atom is not None:
7596                                                 parent_atoms = set([("@world", atom)])
7597                                 if parent_atoms:
7598                                         conflict_pkgs[pkg] = parent_atoms
7599
7600                 if conflict_pkgs:
7601                         # Reduce noise by pruning packages that are only
7602                         # pulled in by other conflict packages.
7603                         pruned_pkgs = set()
7604                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7605                                 relevant_parent = False
7606                                 for parent, atom in parent_atoms:
7607                                         if parent not in conflict_pkgs:
7608                                                 relevant_parent = True
7609                                                 break
7610                                 if not relevant_parent:
7611                                         pruned_pkgs.add(pkg)
7612                         for pkg in pruned_pkgs:
7613                                 del conflict_pkgs[pkg]
7614
7615                 if conflict_pkgs:
7616                         msg = []
7617                         msg.append("\n")
7618                         indent = "  "
7619                         # Max number of parents shown, to avoid flooding the display.
7620                         max_parents = 3
7621                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7622
7623                                 pruned_list = set()
7624
7625                                 # Prefer packages that are not directly involved in a conflict.
7626                                 for parent_atom in parent_atoms:
7627                                         if len(pruned_list) >= max_parents:
7628                                                 break
7629                                         parent, atom = parent_atom
7630                                         if parent not in conflict_pkgs:
7631                                                 pruned_list.add(parent_atom)
7632
7633                                 for parent_atom in parent_atoms:
7634                                         if len(pruned_list) >= max_parents:
7635                                                 break
7636                                         pruned_list.add(parent_atom)
7637
7638                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7639                                 msg.append(indent + "%s pulled in by\n" % pkg)
7640
7641                                 for parent_atom in pruned_list:
7642                                         parent, atom = parent_atom
7643                                         msg.append(2*indent)
7644                                         if isinstance(parent,
7645                                                 (PackageArg, AtomArg)):
7646                                                 # For PackageArg and AtomArg types, it's
7647                                                 # redundant to display the atom attribute.
7648                                                 msg.append(str(parent))
7649                                         else:
7650                                                 # Display the specific atom from SetArg or
7651                                                 # Package types.
7652                                                 msg.append("%s required by %s" % (atom, parent))
7653                                         msg.append("\n")
7654
7655                                 if omitted_parents:
7656                                         msg.append(2*indent)
7657                                         msg.append("(and %d more)\n" % omitted_parents)
7658
7659                                 msg.append("\n")
7660
7661                         sys.stderr.write("".join(msg))
7662                         sys.stderr.flush()
7663
7664                 if "--quiet" not in self.myopts:
7665                         show_blocker_docs_link()
7666
7667         def display(self, mylist, favorites=[], verbosity=None):
7668
7669                 # This is used to prevent display_problems() from
7670                 # redundantly displaying this exact same merge list
7671                 # again via _show_merge_list().
7672                 self._displayed_list = mylist
7673
7674                 if verbosity is None:
7675                         verbosity = ("--quiet" in self.myopts and 1 or \
7676                                 "--verbose" in self.myopts and 3 or 2)
7677                 favorites_set = InternalPackageSet(favorites)
7678                 oneshot = "--oneshot" in self.myopts or \
7679                         "--onlydeps" in self.myopts
7680                 columns = "--columns" in self.myopts
7681                 changelogs=[]
7682                 p=[]
7683                 blockers = []
7684
7685                 counters = PackageCounters()
7686
7687                 if verbosity == 1 and "--verbose" not in self.myopts:
7688                         def create_use_string(*args):
7689                                 return ""
7690                 else:
7691                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7692                                 old_iuse, old_use,
7693                                 is_new, reinst_flags,
7694                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7695                                 alphabetical=("--alphabetical" in self.myopts)):
7696                                 enabled = []
7697                                 if alphabetical:
7698                                         disabled = enabled
7699                                         removed = enabled
7700                                 else:
7701                                         disabled = []
7702                                         removed = []
7703                                 cur_iuse = set(cur_iuse)
7704                                 enabled_flags = cur_iuse.intersection(cur_use)
7705                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7706                                 any_iuse = cur_iuse.union(old_iuse)
7707                                 any_iuse = list(any_iuse)
7708                                 any_iuse.sort()
7709                                 for flag in any_iuse:
7710                                         flag_str = None
7711                                         isEnabled = False
7712                                         reinst_flag = reinst_flags and flag in reinst_flags
7713                                         if flag in enabled_flags:
7714                                                 isEnabled = True
7715                                                 if is_new or flag in old_use and \
7716                                                         (all_flags or reinst_flag):
7717                                                         flag_str = red(flag)
7718                                                 elif flag not in old_iuse:
7719                                                         flag_str = yellow(flag) + "%*"
7720                                                 elif flag not in old_use:
7721                                                         flag_str = green(flag) + "*"
7722                                         elif flag in removed_iuse:
7723                                                 if all_flags or reinst_flag:
7724                                                         flag_str = yellow("-" + flag) + "%"
7725                                                         if flag in old_use:
7726                                                                 flag_str += "*"
7727                                                         flag_str = "(" + flag_str + ")"
7728                                                         removed.append(flag_str)
7729                                                 continue
7730                                         else:
7731                                                 if is_new or flag in old_iuse and \
7732                                                         flag not in old_use and \
7733                                                         (all_flags or reinst_flag):
7734                                                         flag_str = blue("-" + flag)
7735                                                 elif flag not in old_iuse:
7736                                                         flag_str = yellow("-" + flag)
7737                                                         if flag not in iuse_forced:
7738                                                                 flag_str += "%"
7739                                                 elif flag in old_use:
7740                                                         flag_str = green("-" + flag) + "*"
7741                                         if flag_str:
7742                                                 if flag in iuse_forced:
7743                                                         flag_str = "(" + flag_str + ")"
7744                                                 if isEnabled:
7745                                                         enabled.append(flag_str)
7746                                                 else:
7747                                                         disabled.append(flag_str)
7748
7749                                 if alphabetical:
7750                                         ret = " ".join(enabled)
7751                                 else:
7752                                         ret = " ".join(enabled + disabled + removed)
7753                                 if ret:
7754                                         ret = '%s="%s" ' % (name, ret)
7755                                 return ret
7756
7757                 repo_display = RepoDisplay(self.roots)
7758
7759                 tree_nodes = []
7760                 display_list = []
7761                 mygraph = self.digraph.copy()
7762
7763                 # If there are any Uninstall instances, add the corresponding
7764                 # blockers to the digraph (useful for --tree display).
7765
7766                 executed_uninstalls = set(node for node in mylist \
7767                         if isinstance(node, Package) and node.operation == "unmerge")
7768
7769                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7770                         uninstall_parents = \
7771                                 self._blocker_uninstalls.parent_nodes(uninstall)
7772                         if not uninstall_parents:
7773                                 continue
7774
7775                         # Remove the corresponding "nomerge" node and substitute
7776                         # the Uninstall node.
7777                         inst_pkg = self._pkg_cache[
7778                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7779                         try:
7780                                 mygraph.remove(inst_pkg)
7781                         except KeyError:
7782                                 pass
7783
7784                         try:
7785                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7786                         except KeyError:
7787                                 inst_pkg_blockers = []
7788
7789                         # Break the Package -> Uninstall edges.
7790                         mygraph.remove(uninstall)
7791
7792                         # Resolution of a package's blockers
7793                         # depend on it's own uninstallation.
7794                         for blocker in inst_pkg_blockers:
7795                                 mygraph.add(uninstall, blocker)
7796
7797                         # Expand Package -> Uninstall edges into
7798                         # Package -> Blocker -> Uninstall edges.
7799                         for blocker in uninstall_parents:
7800                                 mygraph.add(uninstall, blocker)
7801                                 for parent in self._blocker_parents.parent_nodes(blocker):
7802                                         if parent != inst_pkg:
7803                                                 mygraph.add(blocker, parent)
7804
7805                         # If the uninstall task did not need to be executed because
7806                         # of an upgrade, display Blocker -> Upgrade edges since the
7807                         # corresponding Blocker -> Uninstall edges will not be shown.
7808                         upgrade_node = \
7809                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7810                         if upgrade_node is not None and \
7811                                 uninstall not in executed_uninstalls:
7812                                 for blocker in uninstall_parents:
7813                                         mygraph.add(upgrade_node, blocker)
7814
7815                 unsatisfied_blockers = []
7816                 i = 0
7817                 depth = 0
7818                 shown_edges = set()
7819                 for x in mylist:
7820                         if isinstance(x, Blocker) and not x.satisfied:
7821                                 unsatisfied_blockers.append(x)
7822                                 continue
7823                         graph_key = x
7824                         if "--tree" in self.myopts:
7825                                 depth = len(tree_nodes)
7826                                 while depth and graph_key not in \
7827                                         mygraph.child_nodes(tree_nodes[depth-1]):
7828                                                 depth -= 1
7829                                 if depth:
7830                                         tree_nodes = tree_nodes[:depth]
7831                                         tree_nodes.append(graph_key)
7832                                         display_list.append((x, depth, True))
7833                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7834                                 else:
7835                                         traversed_nodes = set() # prevent endless circles
7836                                         traversed_nodes.add(graph_key)
7837                                         def add_parents(current_node, ordered):
7838                                                 parent_nodes = None
7839                                                 # Do not traverse to parents if this node is an
7840                                                 # an argument or a direct member of a set that has
7841                                                 # been specified as an argument (system or world).
7842                                                 if current_node not in self._set_nodes:
7843                                                         parent_nodes = mygraph.parent_nodes(current_node)
7844                                                 if parent_nodes:
7845                                                         child_nodes = set(mygraph.child_nodes(current_node))
7846                                                         selected_parent = None
7847                                                         # First, try to avoid a direct cycle.
7848                                                         for node in parent_nodes:
7849                                                                 if not isinstance(node, (Blocker, Package)):
7850                                                                         continue
7851                                                                 if node not in traversed_nodes and \
7852                                                                         node not in child_nodes:
7853                                                                         edge = (current_node, node)
7854                                                                         if edge in shown_edges:
7855                                                                                 continue
7856                                                                         selected_parent = node
7857                                                                         break
7858                                                         if not selected_parent:
7859                                                                 # A direct cycle is unavoidable.
7860                                                                 for node in parent_nodes:
7861                                                                         if not isinstance(node, (Blocker, Package)):
7862                                                                                 continue
7863                                                                         if node not in traversed_nodes:
7864                                                                                 edge = (current_node, node)
7865                                                                                 if edge in shown_edges:
7866                                                                                         continue
7867                                                                                 selected_parent = node
7868                                                                                 break
7869                                                         if selected_parent:
7870                                                                 shown_edges.add((current_node, selected_parent))
7871                                                                 traversed_nodes.add(selected_parent)
7872                                                                 add_parents(selected_parent, False)
7873                                                 display_list.append((current_node,
7874                                                         len(tree_nodes), ordered))
7875                                                 tree_nodes.append(current_node)
7876                                         tree_nodes = []
7877                                         add_parents(graph_key, True)
7878                         else:
7879                                 display_list.append((x, depth, True))
7880                 mylist = display_list
7881                 for x in unsatisfied_blockers:
7882                         mylist.append((x, 0, True))
7883
7884                 last_merge_depth = 0
7885                 for i in xrange(len(mylist)-1,-1,-1):
7886                         graph_key, depth, ordered = mylist[i]
7887                         if not ordered and depth == 0 and i > 0 \
7888                                 and graph_key == mylist[i-1][0] and \
7889                                 mylist[i-1][1] == 0:
7890                                 # An ordered node got a consecutive duplicate when the tree was
7891                                 # being filled in.
7892                                 del mylist[i]
7893                                 continue
7894                         if ordered and graph_key[-1] != "nomerge":
7895                                 last_merge_depth = depth
7896                                 continue
7897                         if depth >= last_merge_depth or \
7898                                 i < len(mylist) - 1 and \
7899                                 depth >= mylist[i+1][1]:
7900                                         del mylist[i]
7901
7902                 from portage import flatten
7903                 from portage.dep import use_reduce, paren_reduce
7904                 # files to fetch list - avoids counting a same file twice
7905                 # in size display (verbose mode)
7906                 myfetchlist=[]
7907
7908                 # Use this set to detect when all the "repoadd" strings are "[0]"
7909                 # and disable the entire repo display in this case.
7910                 repoadd_set = set()
7911
7912                 for mylist_index in xrange(len(mylist)):
7913                         x, depth, ordered = mylist[mylist_index]
7914                         pkg_type = x[0]
7915                         myroot = x[1]
7916                         pkg_key = x[2]
7917                         portdb = self.trees[myroot]["porttree"].dbapi
7918                         bindb  = self.trees[myroot]["bintree"].dbapi
7919                         vardb = self.trees[myroot]["vartree"].dbapi
7920                         vartree = self.trees[myroot]["vartree"]
7921                         pkgsettings = self.pkgsettings[myroot]
7922
7923                         fetch=" "
7924                         indent = " " * depth
7925
7926                         if isinstance(x, Blocker):
7927                                 if x.satisfied:
7928                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7929                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7930                                 else:
7931                                         blocker_style = "PKG_BLOCKER"
7932                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7933                                 if ordered:
7934                                         counters.blocks += 1
7935                                         if x.satisfied:
7936                                                 counters.blocks_satisfied += 1
7937                                 resolved = portage.key_expand(
7938                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7939                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7940                                         addl += " " + colorize(blocker_style, resolved)
7941                                 else:
7942                                         addl = "[%s %s] %s%s" % \
7943                                                 (colorize(blocker_style, "blocks"),
7944                                                 addl, indent, colorize(blocker_style, resolved))
7945                                 block_parents = self._blocker_parents.parent_nodes(x)
7946                                 block_parents = set([pnode[2] for pnode in block_parents])
7947                                 block_parents = ", ".join(block_parents)
7948                                 if resolved!=x[2]:
7949                                         addl += colorize(blocker_style,
7950                                                 " (\"%s\" is blocking %s)") % \
7951                                                 (str(x.atom).lstrip("!"), block_parents)
7952                                 else:
7953                                         addl += colorize(blocker_style,
7954                                                 " (is blocking %s)") % block_parents
7955                                 if isinstance(x, Blocker) and x.satisfied:
7956                                         if columns:
7957                                                 continue
7958                                         p.append(addl)
7959                                 else:
7960                                         blockers.append(addl)
7961                         else:
7962                                 pkg_status = x[3]
7963                                 pkg_merge = ordered and pkg_status == "merge"
7964                                 if not pkg_merge and pkg_status == "merge":
7965                                         pkg_status = "nomerge"
7966                                 built = pkg_type != "ebuild"
7967                                 installed = pkg_type == "installed"
7968                                 pkg = x
7969                                 metadata = pkg.metadata
7970                                 ebuild_path = None
7971                                 repo_name = metadata["repository"]
7972                                 if pkg_type == "ebuild":
7973                                         ebuild_path = portdb.findname(pkg_key)
7974                                         if not ebuild_path: # shouldn't happen
7975                                                 raise portage.exception.PackageNotFound(pkg_key)
7976                                         repo_path_real = os.path.dirname(os.path.dirname(
7977                                                 os.path.dirname(ebuild_path)))
7978                                 else:
7979                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7980                                 pkg_use = list(pkg.use.enabled)
7981                                 try:
7982                                         restrict = flatten(use_reduce(paren_reduce(
7983                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7984                                 except portage.exception.InvalidDependString, e:
7985                                         if not pkg.installed:
7986                                                 show_invalid_depstring_notice(x,
7987                                                         pkg.metadata["RESTRICT"], str(e))
7988                                                 del e
7989                                                 return 1
7990                                         restrict = []
7991                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7992                                         "fetch" in restrict:
7993                                         fetch = red("F")
7994                                         if ordered:
7995                                                 counters.restrict_fetch += 1
7996                                         if portdb.fetch_check(pkg_key, pkg_use):
7997                                                 fetch = green("f")
7998                                                 if ordered:
7999                                                         counters.restrict_fetch_satisfied += 1
8000
8001                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8002                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8003                                 myoldbest = []
8004                                 myinslotlist = None
8005                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8006                                 if vardb.cpv_exists(pkg_key):
8007                                         addl="  "+yellow("R")+fetch+"  "
8008                                         if ordered:
8009                                                 if pkg_merge:
8010                                                         counters.reinst += 1
8011                                                 elif pkg_status == "uninstall":
8012                                                         counters.uninst += 1
8013                                 # filter out old-style virtual matches
8014                                 elif installed_versions and \
8015                                         portage.cpv_getkey(installed_versions[0]) == \
8016                                         portage.cpv_getkey(pkg_key):
8017                                         myinslotlist = vardb.match(pkg.slot_atom)
8018                                         # If this is the first install of a new-style virtual, we
8019                                         # need to filter out old-style virtual matches.
8020                                         if myinslotlist and \
8021                                                 portage.cpv_getkey(myinslotlist[0]) != \
8022                                                 portage.cpv_getkey(pkg_key):
8023                                                 myinslotlist = None
8024                                         if myinslotlist:
8025                                                 myoldbest = myinslotlist[:]
8026                                                 addl = "   " + fetch
8027                                                 if not portage.dep.cpvequal(pkg_key,
8028                                                         portage.best([pkg_key] + myoldbest)):
8029                                                         # Downgrade in slot
8030                                                         addl += turquoise("U")+blue("D")
8031                                                         if ordered:
8032                                                                 counters.downgrades += 1
8033                                                 else:
8034                                                         # Update in slot
8035                                                         addl += turquoise("U") + " "
8036                                                         if ordered:
8037                                                                 counters.upgrades += 1
8038                                         else:
8039                                                 # New slot, mark it new.
8040                                                 addl = " " + green("NS") + fetch + "  "
8041                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8042                                                 if ordered:
8043                                                         counters.newslot += 1
8044
8045                                         if "--changelog" in self.myopts:
8046                                                 inst_matches = vardb.match(pkg.slot_atom)
8047                                                 if inst_matches:
8048                                                         changelogs.extend(self.calc_changelog(
8049                                                                 portdb.findname(pkg_key),
8050                                                                 inst_matches[0], pkg_key))
8051                                 else:
8052                                         addl = " " + green("N") + " " + fetch + "  "
8053                                         if ordered:
8054                                                 counters.new += 1
8055
8056                                 verboseadd = ""
8057                                 repoadd = None
8058
8059                                 if True:
8060                                         # USE flag display
8061                                         forced_flags = set()
8062                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8063                                         forced_flags.update(pkgsettings.useforce)
8064                                         forced_flags.update(pkgsettings.usemask)
8065
8066                                         cur_use = [flag for flag in pkg.use.enabled \
8067                                                 if flag in pkg.iuse.all]
8068                                         cur_iuse = sorted(pkg.iuse.all)
8069
8070                                         if myoldbest and myinslotlist:
8071                                                 previous_cpv = myoldbest[0]
8072                                         else:
8073                                                 previous_cpv = pkg.cpv
8074                                         if vardb.cpv_exists(previous_cpv):
8075                                                 old_iuse, old_use = vardb.aux_get(
8076                                                                 previous_cpv, ["IUSE", "USE"])
8077                                                 old_iuse = list(set(
8078                                                         filter_iuse_defaults(old_iuse.split())))
8079                                                 old_iuse.sort()
8080                                                 old_use = old_use.split()
8081                                                 is_new = False
8082                                         else:
8083                                                 old_iuse = []
8084                                                 old_use = []
8085                                                 is_new = True
8086
8087                                         old_use = [flag for flag in old_use if flag in old_iuse]
8088
8089                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8090                                         use_expand.sort()
8091                                         use_expand.reverse()
8092                                         use_expand_hidden = \
8093                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8094
8095                                         def map_to_use_expand(myvals, forcedFlags=False,
8096                                                 removeHidden=True):
8097                                                 ret = {}
8098                                                 forced = {}
8099                                                 for exp in use_expand:
8100                                                         ret[exp] = []
8101                                                         forced[exp] = set()
8102                                                         for val in myvals[:]:
8103                                                                 if val.startswith(exp.lower()+"_"):
8104                                                                         if val in forced_flags:
8105                                                                                 forced[exp].add(val[len(exp)+1:])
8106                                                                         ret[exp].append(val[len(exp)+1:])
8107                                                                         myvals.remove(val)
8108                                                 ret["USE"] = myvals
8109                                                 forced["USE"] = [val for val in myvals \
8110                                                         if val in forced_flags]
8111                                                 if removeHidden:
8112                                                         for exp in use_expand_hidden:
8113                                                                 ret.pop(exp, None)
8114                                                 if forcedFlags:
8115                                                         return ret, forced
8116                                                 return ret
8117
8118                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8119                                         # are the only thing that triggered reinstallation.
8120                                         reinst_flags_map = {}
8121                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8122                                         reinst_expand_map = None
8123                                         if reinstall_for_flags:
8124                                                 reinst_flags_map = map_to_use_expand(
8125                                                         list(reinstall_for_flags), removeHidden=False)
8126                                                 for k in list(reinst_flags_map):
8127                                                         if not reinst_flags_map[k]:
8128                                                                 del reinst_flags_map[k]
8129                                                 if not reinst_flags_map.get("USE"):
8130                                                         reinst_expand_map = reinst_flags_map.copy()
8131                                                         reinst_expand_map.pop("USE", None)
8132                                         if reinst_expand_map and \
8133                                                 not set(reinst_expand_map).difference(
8134                                                 use_expand_hidden):
8135                                                 use_expand_hidden = \
8136                                                         set(use_expand_hidden).difference(
8137                                                         reinst_expand_map)
8138
8139                                         cur_iuse_map, iuse_forced = \
8140                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8141                                         cur_use_map = map_to_use_expand(cur_use)
8142                                         old_iuse_map = map_to_use_expand(old_iuse)
8143                                         old_use_map = map_to_use_expand(old_use)
8144
8145                                         use_expand.sort()
8146                                         use_expand.insert(0, "USE")
8147                                         
8148                                         for key in use_expand:
8149                                                 if key in use_expand_hidden:
8150                                                         continue
8151                                                 verboseadd += create_use_string(key.upper(),
8152                                                         cur_iuse_map[key], iuse_forced[key],
8153                                                         cur_use_map[key], old_iuse_map[key],
8154                                                         old_use_map[key], is_new,
8155                                                         reinst_flags_map.get(key))
8156
8157                                 if verbosity == 3:
8158                                         # size verbose
8159                                         mysize=0
8160                                         if pkg_type == "ebuild" and pkg_merge:
8161                                                 try:
8162                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8163                                                                 useflags=pkg_use, debug=self.edebug)
8164                                                 except portage.exception.InvalidDependString, e:
8165                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8166                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8167                                                         del e
8168                                                         return 1
8169                                                 if myfilesdict is None:
8170                                                         myfilesdict="[empty/missing/bad digest]"
8171                                                 else:
8172                                                         for myfetchfile in myfilesdict:
8173                                                                 if myfetchfile not in myfetchlist:
8174                                                                         mysize+=myfilesdict[myfetchfile]
8175                                                                         myfetchlist.append(myfetchfile)
8176                                                         if ordered:
8177                                                                 counters.totalsize += mysize
8178                                                 verboseadd += format_size(mysize)
8179
8180                                         # overlay verbose
8181                                         # assign index for a previous version in the same slot
8182                                         has_previous = False
8183                                         repo_name_prev = None
8184                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8185                                                 metadata["SLOT"])
8186                                         slot_matches = vardb.match(slot_atom)
8187                                         if slot_matches:
8188                                                 has_previous = True
8189                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8190                                                         ["repository"])[0]
8191
8192                                         # now use the data to generate output
8193                                         if pkg.installed or not has_previous:
8194                                                 repoadd = repo_display.repoStr(repo_path_real)
8195                                         else:
8196                                                 repo_path_prev = None
8197                                                 if repo_name_prev:
8198                                                         repo_path_prev = portdb.getRepositoryPath(
8199                                                                 repo_name_prev)
8200                                                 if repo_path_prev == repo_path_real:
8201                                                         repoadd = repo_display.repoStr(repo_path_real)
8202                                                 else:
8203                                                         repoadd = "%s=>%s" % (
8204                                                                 repo_display.repoStr(repo_path_prev),
8205                                                                 repo_display.repoStr(repo_path_real))
8206                                         if repoadd:
8207                                                 repoadd_set.add(repoadd)
8208
8209                                 xs = [portage.cpv_getkey(pkg_key)] + \
8210                                         list(portage.catpkgsplit(pkg_key)[2:])
8211                                 if xs[2] == "r0":
8212                                         xs[2] = ""
8213                                 else:
8214                                         xs[2] = "-" + xs[2]
8215
8216                                 mywidth = 130
8217                                 if "COLUMNWIDTH" in self.settings:
8218                                         try:
8219                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8220                                         except ValueError, e:
8221                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8222                                                 portage.writemsg(
8223                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8224                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8225                                                 del e
8226                                 oldlp = mywidth - 30
8227                                 newlp = oldlp - 30
8228
8229                                 # Convert myoldbest from a list to a string.
8230                                 if not myoldbest:
8231                                         myoldbest = ""
8232                                 else:
8233                                         for pos, key in enumerate(myoldbest):
8234                                                 key = portage.catpkgsplit(key)[2] + \
8235                                                         "-" + portage.catpkgsplit(key)[3]
8236                                                 if key[-3:] == "-r0":
8237                                                         key = key[:-3]
8238                                                 myoldbest[pos] = key
8239                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8240
8241                                 pkg_cp = xs[0]
8242                                 root_config = self.roots[myroot]
8243                                 system_set = root_config.sets["system"]
8244                                 world_set  = root_config.sets["world"]
8245
8246                                 pkg_system = False
8247                                 pkg_world = False
8248                                 try:
8249                                         pkg_system = system_set.findAtomForPackage(pkg)
8250                                         pkg_world  = world_set.findAtomForPackage(pkg)
8251                                         if not (oneshot or pkg_world) and \
8252                                                 myroot == self.target_root and \
8253                                                 favorites_set.findAtomForPackage(pkg):
8254                                                 # Maybe it will be added to world now.
8255                                                 if create_world_atom(pkg, favorites_set, root_config):
8256                                                         pkg_world = True
8257                                 except portage.exception.InvalidDependString:
8258                                         # This is reported elsewhere if relevant.
8259                                         pass
8260
8261                                 def pkgprint(pkg_str):
8262                                         if pkg_merge:
8263                                                 if pkg_system:
8264                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8265                                                 elif pkg_world:
8266                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8267                                                 else:
8268                                                         return colorize("PKG_MERGE", pkg_str)
8269                                         elif pkg_status == "uninstall":
8270                                                 return colorize("PKG_UNINSTALL", pkg_str)
8271                                         else:
8272                                                 if pkg_system:
8273                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8274                                                 elif pkg_world:
8275                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8276                                                 else:
8277                                                         return colorize("PKG_NOMERGE", pkg_str)
8278
8279                                 try:
8280                                         properties = flatten(use_reduce(paren_reduce(
8281                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8282                                 except portage.exception.InvalidDependString, e:
8283                                         if not pkg.installed:
8284                                                 show_invalid_depstring_notice(pkg,
8285                                                         pkg.metadata["PROPERTIES"], str(e))
8286                                                 del e
8287                                                 return 1
8288                                         properties = []
8289                                 interactive = "interactive" in properties
8290                                 if interactive and pkg.operation == "merge":
8291                                         addl = colorize("WARN", "I") + addl[1:]
8292                                         if ordered:
8293                                                 counters.interactive += 1
8294
8295                                 if x[1]!="/":
8296                                         if myoldbest:
8297                                                 myoldbest +=" "
8298                                         if "--columns" in self.myopts:
8299                                                 if "--quiet" in self.myopts:
8300                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8301                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8302                                                         myprint=myprint+myoldbest
8303                                                         myprint=myprint+darkgreen("to "+x[1])
8304                                                         verboseadd = None
8305                                                 else:
8306                                                         if not pkg_merge:
8307                                                                 myprint = "[%s] %s%s" % \
8308                                                                         (pkgprint(pkg_status.ljust(13)),
8309                                                                         indent, pkgprint(pkg.cp))
8310                                                         else:
8311                                                                 myprint = "[%s %s] %s%s" % \
8312                                                                         (pkgprint(pkg.type_name), addl,
8313                                                                         indent, pkgprint(pkg.cp))
8314                                                         if (newlp-nc_len(myprint)) > 0:
8315                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8316                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8317                                                         if (oldlp-nc_len(myprint)) > 0:
8318                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8319                                                         myprint=myprint+myoldbest
8320                                                         myprint += darkgreen("to " + pkg.root)
8321                                         else:
8322                                                 if not pkg_merge:
8323                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8324                                                 else:
8325                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8326                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8327                                                         myoldbest + darkgreen("to " + myroot)
8328                                 else:
8329                                         if "--columns" in self.myopts:
8330                                                 if "--quiet" in self.myopts:
8331                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8332                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8333                                                         myprint=myprint+myoldbest
8334                                                         verboseadd = None
8335                                                 else:
8336                                                         if not pkg_merge:
8337                                                                 myprint = "[%s] %s%s" % \
8338                                                                         (pkgprint(pkg_status.ljust(13)),
8339                                                                         indent, pkgprint(pkg.cp))
8340                                                         else:
8341                                                                 myprint = "[%s %s] %s%s" % \
8342                                                                         (pkgprint(pkg.type_name), addl,
8343                                                                         indent, pkgprint(pkg.cp))
8344                                                         if (newlp-nc_len(myprint)) > 0:
8345                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8346                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8347                                                         if (oldlp-nc_len(myprint)) > 0:
8348                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8349                                                         myprint += myoldbest
8350                                         else:
8351                                                 if not pkg_merge:
8352                                                         myprint = "[%s] %s%s %s" % \
8353                                                                 (pkgprint(pkg_status.ljust(13)),
8354                                                                 indent, pkgprint(pkg.cpv),
8355                                                                 myoldbest)
8356                                                 else:
8357                                                         myprint = "[%s %s] %s%s %s" % \
8358                                                                 (pkgprint(pkg_type), addl, indent,
8359                                                                 pkgprint(pkg.cpv), myoldbest)
8360
8361                                 if columns and pkg.operation == "uninstall":
8362                                         continue
8363                                 p.append((myprint, verboseadd, repoadd))
8364
8365                                 if "--tree" not in self.myopts and \
8366                                         "--quiet" not in self.myopts and \
8367                                         not self._opts_no_restart.intersection(self.myopts) and \
8368                                         pkg.root == self._running_root.root and \
8369                                         portage.match_from_list(
8370                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8371                                         not vardb.cpv_exists(pkg.cpv) and \
8372                                         "--quiet" not in self.myopts:
8373                                                 if mylist_index < len(mylist) - 1:
8374                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8375                                                         p.append(colorize("WARN", "    then resume the merge."))
8376
8377                 out = sys.stdout
8378                 show_repos = repoadd_set and repoadd_set != set(["0"])
8379
8380                 for x in p:
8381                         if isinstance(x, basestring):
8382                                 out.write("%s\n" % (x,))
8383                                 continue
8384
8385                         myprint, verboseadd, repoadd = x
8386
8387                         if verboseadd:
8388                                 myprint += " " + verboseadd
8389
8390                         if show_repos and repoadd:
8391                                 myprint += " " + teal("[%s]" % repoadd)
8392
8393                         out.write("%s\n" % (myprint,))
8394
8395                 for x in blockers:
8396                         print x
8397
8398                 if verbosity == 3:
8399                         print
8400                         print counters
8401                         if show_repos:
8402                                 sys.stdout.write(str(repo_display))
8403
8404                 if "--changelog" in self.myopts:
8405                         print
8406                         for revision,text in changelogs:
8407                                 print bold('*'+revision)
8408                                 sys.stdout.write(text)
8409
8410                 sys.stdout.flush()
8411                 return os.EX_OK
8412
8413         def display_problems(self):
8414                 """
8415                 Display problems with the dependency graph such as slot collisions.
8416                 This is called internally by display() to show the problems _after_
8417                 the merge list where it is most likely to be seen, but if display()
8418                 is not going to be called then this method should be called explicitly
8419                 to ensure that the user is notified of problems with the graph.
8420
8421                 All output goes to stderr, except for unsatisfied dependencies which
8422                 go to stdout for parsing by programs such as autounmask.
8423                 """
8424
8425                 # Note that show_masked_packages() sends it's output to
8426                 # stdout, and some programs such as autounmask parse the
8427                 # output in cases when emerge bails out. However, when
8428                 # show_masked_packages() is called for installed packages
8429                 # here, the message is a warning that is more appropriate
8430                 # to send to stderr, so temporarily redirect stdout to
8431                 # stderr. TODO: Fix output code so there's a cleaner way
8432                 # to redirect everything to stderr.
8433                 sys.stdout.flush()
8434                 sys.stderr.flush()
8435                 stdout = sys.stdout
8436                 try:
8437                         sys.stdout = sys.stderr
8438                         self._display_problems()
8439                 finally:
8440                         sys.stdout = stdout
8441                         sys.stdout.flush()
8442                         sys.stderr.flush()
8443
8444                 # This goes to stdout for parsing by programs like autounmask.
8445                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8446                         self._show_unsatisfied_dep(*pargs, **kwargs)
8447
8448         def _display_problems(self):
8449                 if self._circular_deps_for_display is not None:
8450                         self._show_circular_deps(
8451                                 self._circular_deps_for_display)
8452
8453                 # The user is only notified of a slot conflict if
8454                 # there are no unresolvable blocker conflicts.
8455                 if self._unsatisfied_blockers_for_display is not None:
8456                         self._show_unsatisfied_blockers(
8457                                 self._unsatisfied_blockers_for_display)
8458                 else:
8459                         self._show_slot_collision_notice()
8460
8461                 # TODO: Add generic support for "set problem" handlers so that
8462                 # the below warnings aren't special cases for world only.
8463
8464                 if self._missing_args:
8465                         world_problems = False
8466                         if "world" in self._sets:
8467                                 # Filter out indirect members of world (from nested sets)
8468                                 # since only direct members of world are desired here.
8469                                 world_set = self.roots[self.target_root].sets["world"]
8470                                 for arg, atom in self._missing_args:
8471                                         if arg.name == "world" and atom in world_set:
8472                                                 world_problems = True
8473                                                 break
8474
8475                         if world_problems:
8476                                 sys.stderr.write("\n!!! Problems have been " + \
8477                                         "detected with your world file\n")
8478                                 sys.stderr.write("!!! Please run " + \
8479                                         green("emaint --check world")+"\n\n")
8480
8481                 if self._missing_args:
8482                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8483                                 " Ebuilds for the following packages are either all\n")
8484                         sys.stderr.write(colorize("BAD", "!!!") + \
8485                                 " masked or don't exist:\n")
8486                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8487                                 self._missing_args) + "\n")
8488
8489                 if self._pprovided_args:
8490                         arg_refs = {}
8491                         for arg, atom in self._pprovided_args:
8492                                 if isinstance(arg, SetArg):
8493                                         parent = arg.name
8494                                         arg_atom = (atom, atom)
8495                                 else:
8496                                         parent = "args"
8497                                         arg_atom = (arg.arg, atom)
8498                                 refs = arg_refs.setdefault(arg_atom, [])
8499                                 if parent not in refs:
8500                                         refs.append(parent)
8501                         msg = []
8502                         msg.append(bad("\nWARNING: "))
8503                         if len(self._pprovided_args) > 1:
8504                                 msg.append("Requested packages will not be " + \
8505                                         "merged because they are listed in\n")
8506                         else:
8507                                 msg.append("A requested package will not be " + \
8508                                         "merged because it is listed in\n")
8509                         msg.append("package.provided:\n\n")
8510                         problems_sets = set()
8511                         for (arg, atom), refs in arg_refs.iteritems():
8512                                 ref_string = ""
8513                                 if refs:
8514                                         problems_sets.update(refs)
8515                                         refs.sort()
8516                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8517                                         ref_string = " pulled in by " + ref_string
8518                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8519                         msg.append("\n")
8520                         if "world" in problems_sets:
8521                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8522                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8523                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8524                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8525                                 msg.append("The best course of action depends on the reason that an offending\n")
8526                                 msg.append("package.provided entry exists.\n\n")
8527                         sys.stderr.write("".join(msg))
8528
8529                 masked_packages = []
8530                 for pkg in self._masked_installed:
8531                         root_config = pkg.root_config
8532                         pkgsettings = self.pkgsettings[pkg.root]
8533                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8534                         masked_packages.append((root_config, pkgsettings,
8535                                 pkg.cpv, pkg.metadata, mreasons))
8536                 if masked_packages:
8537                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8538                                 " The following installed packages are masked:\n")
8539                         show_masked_packages(masked_packages)
8540                         show_mask_docs()
8541                         print
8542
8543         def calc_changelog(self,ebuildpath,current,next):
8544                 if ebuildpath == None or not os.path.exists(ebuildpath):
8545                         return []
8546                 current = '-'.join(portage.catpkgsplit(current)[1:])
8547                 if current.endswith('-r0'):
8548                         current = current[:-3]
8549                 next = '-'.join(portage.catpkgsplit(next)[1:])
8550                 if next.endswith('-r0'):
8551                         next = next[:-3]
8552                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8553                 try:
8554                         changelog = open(changelogpath).read()
8555                 except SystemExit, e:
8556                         raise # Needed else can't exit
8557                 except:
8558                         return []
8559                 divisions = self.find_changelog_tags(changelog)
8560                 #print 'XX from',current,'to',next
8561                 #for div,text in divisions: print 'XX',div
8562                 # skip entries for all revisions above the one we are about to emerge
8563                 for i in range(len(divisions)):
8564                         if divisions[i][0]==next:
8565                                 divisions = divisions[i:]
8566                                 break
8567                 # find out how many entries we are going to display
8568                 for i in range(len(divisions)):
8569                         if divisions[i][0]==current:
8570                                 divisions = divisions[:i]
8571                                 break
8572                 else:
8573                     # couldnt find the current revision in the list. display nothing
8574                         return []
8575                 return divisions
8576
8577         def find_changelog_tags(self,changelog):
8578                 divs = []
8579                 release = None
8580                 while 1:
8581                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8582                         if match is None:
8583                                 if release is not None:
8584                                         divs.append((release,changelog))
8585                                 return divs
8586                         if release is not None:
8587                                 divs.append((release,changelog[:match.start()]))
8588                         changelog = changelog[match.end():]
8589                         release = match.group(1)
8590                         if release.endswith('.ebuild'):
8591                                 release = release[:-7]
8592                         if release.endswith('-r0'):
8593                                 release = release[:-3]
8594
8595         def saveNomergeFavorites(self):
8596                 """Find atoms in favorites that are not in the mergelist and add them
8597                 to the world file if necessary."""
8598                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8599                         "--oneshot", "--onlydeps", "--pretend"):
8600                         if x in self.myopts:
8601                                 return
8602                 root_config = self.roots[self.target_root]
8603                 world_set = root_config.sets["world"]
8604
8605                 world_locked = False
8606                 if hasattr(world_set, "lock"):
8607                         world_set.lock()
8608                         world_locked = True
8609
8610                 if hasattr(world_set, "load"):
8611                         world_set.load() # maybe it's changed on disk
8612
8613                 args_set = self._sets["args"]
8614                 portdb = self.trees[self.target_root]["porttree"].dbapi
8615                 added_favorites = set()
8616                 for x in self._set_nodes:
8617                         pkg_type, root, pkg_key, pkg_status = x
8618                         if pkg_status != "nomerge":
8619                                 continue
8620
8621                         try:
8622                                 myfavkey = create_world_atom(x, args_set, root_config)
8623                                 if myfavkey:
8624                                         if myfavkey in added_favorites:
8625                                                 continue
8626                                         added_favorites.add(myfavkey)
8627                         except portage.exception.InvalidDependString, e:
8628                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8629                                         (pkg_key, str(e)), noiselevel=-1)
8630                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8631                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8632                                 del e
8633                 all_added = []
8634                 for k in self._sets:
8635                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8636                                 continue
8637                         s = SETPREFIX + k
8638                         if s in world_set:
8639                                 continue
8640                         all_added.append(SETPREFIX + k)
8641                 all_added.extend(added_favorites)
8642                 all_added.sort()
8643                 for a in all_added:
8644                         print ">>> Recording %s in \"world\" favorites file..." % \
8645                                 colorize("INFORM", str(a))
8646                 if all_added:
8647                         world_set.update(all_added)
8648
8649                 if world_locked:
8650                         world_set.unlock()
8651
8652         def loadResumeCommand(self, resume_data, skip_masked=False):
8653                 """
8654                 Add a resume command to the graph and validate it in the process.  This
8655                 will raise a PackageNotFound exception if a package is not available.
8656                 """
8657
8658                 if not isinstance(resume_data, dict):
8659                         return False
8660
8661                 mergelist = resume_data.get("mergelist")
8662                 if not isinstance(mergelist, list):
8663                         mergelist = []
8664
8665                 fakedb = self.mydbapi
8666                 trees = self.trees
8667                 serialized_tasks = []
8668                 masked_tasks = []
8669                 for x in mergelist:
8670                         if not (isinstance(x, list) and len(x) == 4):
8671                                 continue
8672                         pkg_type, myroot, pkg_key, action = x
8673                         if pkg_type not in self.pkg_tree_map:
8674                                 continue
8675                         if action != "merge":
8676                                 continue
8677                         tree_type = self.pkg_tree_map[pkg_type]
8678                         mydb = trees[myroot][tree_type].dbapi
8679                         db_keys = list(self._trees_orig[myroot][
8680                                 tree_type].dbapi._aux_cache_keys)
8681                         try:
8682                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8683                         except KeyError:
8684                                 # It does no exist or it is corrupt.
8685                                 if action == "uninstall":
8686                                         continue
8687                                 raise portage.exception.PackageNotFound(pkg_key)
8688                         installed = action == "uninstall"
8689                         built = pkg_type != "ebuild"
8690                         root_config = self.roots[myroot]
8691                         pkg = Package(built=built, cpv=pkg_key,
8692                                 installed=installed, metadata=metadata,
8693                                 operation=action, root_config=root_config,
8694                                 type_name=pkg_type)
8695                         if pkg_type == "ebuild":
8696                                 pkgsettings = self.pkgsettings[myroot]
8697                                 pkgsettings.setcpv(pkg)
8698                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8699                         self._pkg_cache[pkg] = pkg
8700
8701                         root_config = self.roots[pkg.root]
8702                         if "merge" == pkg.operation and \
8703                                 not visible(root_config.settings, pkg):
8704                                 if skip_masked:
8705                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8706                                 else:
8707                                         self._unsatisfied_deps_for_display.append(
8708                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8709
8710                         fakedb[myroot].cpv_inject(pkg)
8711                         serialized_tasks.append(pkg)
8712                         self.spinner.update()
8713
8714                 if self._unsatisfied_deps_for_display:
8715                         return False
8716
8717                 if not serialized_tasks or "--nodeps" in self.myopts:
8718                         self._serialized_tasks_cache = serialized_tasks
8719                         self._scheduler_graph = self.digraph
8720                 else:
8721                         self._select_package = self._select_pkg_from_graph
8722                         self.myparams.add("selective")
8723                         # Always traverse deep dependencies in order to account for
8724                         # potentially unsatisfied dependencies of installed packages.
8725                         # This is necessary for correct --keep-going or --resume operation
8726                         # in case a package from a group of circularly dependent packages
8727                         # fails. In this case, a package which has recently been installed
8728                         # may have an unsatisfied circular dependency (pulled in by
8729                         # PDEPEND, for example). So, even though a package is already
8730                         # installed, it may not have all of it's dependencies satisfied, so
8731                         # it may not be usable. If such a package is in the subgraph of
8732                         # deep depenedencies of a scheduled build, that build needs to
8733                         # be cancelled. In order for this type of situation to be
8734                         # recognized, deep traversal of dependencies is required.
8735                         self.myparams.add("deep")
8736
8737                         favorites = resume_data.get("favorites")
8738                         args_set = self._sets["args"]
8739                         if isinstance(favorites, list):
8740                                 args = self._load_favorites(favorites)
8741                         else:
8742                                 args = []
8743
8744                         for task in serialized_tasks:
8745                                 if isinstance(task, Package) and \
8746                                         task.operation == "merge":
8747                                         if not self._add_pkg(task, None):
8748                                                 return False
8749
8750                         # Packages for argument atoms need to be explicitly
8751                         # added via _add_pkg() so that they are included in the
8752                         # digraph (needed at least for --tree display).
8753                         for arg in args:
8754                                 for atom in arg.set:
8755                                         pkg, existing_node = self._select_package(
8756                                                 arg.root_config.root, atom)
8757                                         if existing_node is None and \
8758                                                 pkg is not None:
8759                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8760                                                         root=pkg.root, parent=arg)):
8761                                                         return False
8762
8763                         # Allow unsatisfied deps here to avoid showing a masking
8764                         # message for an unsatisfied dep that isn't necessarily
8765                         # masked.
8766                         if not self._create_graph(allow_unsatisfied=True):
8767                                 return False
8768
8769                         unsatisfied_deps = []
8770                         for dep in self._unsatisfied_deps:
8771                                 if not isinstance(dep.parent, Package):
8772                                         continue
8773                                 if dep.parent.operation == "merge":
8774                                         unsatisfied_deps.append(dep)
8775                                         continue
8776
8777                                 # For unsatisfied deps of installed packages, only account for
8778                                 # them if they are in the subgraph of dependencies of a package
8779                                 # which is scheduled to be installed.
8780                                 unsatisfied_install = False
8781                                 traversed = set()
8782                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8783                                 while dep_stack:
8784                                         node = dep_stack.pop()
8785                                         if not isinstance(node, Package):
8786                                                 continue
8787                                         if node.operation == "merge":
8788                                                 unsatisfied_install = True
8789                                                 break
8790                                         if node in traversed:
8791                                                 continue
8792                                         traversed.add(node)
8793                                         dep_stack.extend(self.digraph.parent_nodes(node))
8794
8795                                 if unsatisfied_install:
8796                                         unsatisfied_deps.append(dep)
8797
8798                         if masked_tasks or unsatisfied_deps:
8799                                 # This probably means that a required package
8800                                 # was dropped via --skipfirst. It makes the
8801                                 # resume list invalid, so convert it to a
8802                                 # UnsatisfiedResumeDep exception.
8803                                 raise self.UnsatisfiedResumeDep(self,
8804                                         masked_tasks + unsatisfied_deps)
8805                         self._serialized_tasks_cache = None
8806                         try:
8807                                 self.altlist()
8808                         except self._unknown_internal_error:
8809                                 return False
8810
8811                 return True
8812
8813         def _load_favorites(self, favorites):
8814                 """
8815                 Use a list of favorites to resume state from a
8816                 previous select_files() call. This creates similar
8817                 DependencyArg instances to those that would have
8818                 been created by the original select_files() call.
8819                 This allows Package instances to be matched with
8820                 DependencyArg instances during graph creation.
8821                 """
8822                 root_config = self.roots[self.target_root]
8823                 getSetAtoms = root_config.setconfig.getSetAtoms
8824                 sets = root_config.sets
8825                 args = []
8826                 for x in favorites:
8827                         if not isinstance(x, basestring):
8828                                 continue
8829                         if x in ("system", "world"):
8830                                 x = SETPREFIX + x
8831                         if x.startswith(SETPREFIX):
8832                                 s = x[len(SETPREFIX):]
8833                                 if s not in sets:
8834                                         continue
8835                                 if s in self._sets:
8836                                         continue
8837                                 # Recursively expand sets so that containment tests in
8838                                 # self._get_parent_sets() properly match atoms in nested
8839                                 # sets (like if world contains system).
8840                                 expanded_set = InternalPackageSet(
8841                                         initial_atoms=getSetAtoms(s))
8842                                 self._sets[s] = expanded_set
8843                                 args.append(SetArg(arg=x, set=expanded_set,
8844                                         root_config=root_config))
8845                         else:
8846                                 if not portage.isvalidatom(x):
8847                                         continue
8848                                 args.append(AtomArg(arg=x, atom=x,
8849                                         root_config=root_config))
8850
8851                 self._set_args(args)
8852                 return args
8853
8854         class UnsatisfiedResumeDep(portage.exception.PortageException):
8855                 """
8856                 A dependency of a resume list is not installed. This
8857                 can occur when a required package is dropped from the
8858                 merge list via --skipfirst.
8859                 """
8860                 def __init__(self, depgraph, value):
8861                         portage.exception.PortageException.__init__(self, value)
8862                         self.depgraph = depgraph
8863
8864         class _internal_exception(portage.exception.PortageException):
8865                 def __init__(self, value=""):
8866                         portage.exception.PortageException.__init__(self, value)
8867
8868         class _unknown_internal_error(_internal_exception):
8869                 """
8870                 Used by the depgraph internally to terminate graph creation.
8871                 The specific reason for the failure should have been dumped
8872                 to stderr, unfortunately, the exact reason for the failure
8873                 may not be known.
8874                 """
8875
8876         class _serialize_tasks_retry(_internal_exception):
8877                 """
8878                 This is raised by the _serialize_tasks() method when it needs to
8879                 be called again for some reason. The only case that it's currently
8880                 used for is when neglected dependencies need to be added to the
8881                 graph in order to avoid making a potentially unsafe decision.
8882                 """
8883
8884         class _dep_check_composite_db(portage.dbapi):
8885                 """
8886                 A dbapi-like interface that is optimized for use in dep_check() calls.
8887                 This is built on top of the existing depgraph package selection logic.
8888                 Some packages that have been added to the graph may be masked from this
8889                 view in order to influence the atom preference selection that occurs
8890                 via dep_check().
8891                 """
8892                 def __init__(self, depgraph, root):
8893                         portage.dbapi.__init__(self)
8894                         self._depgraph = depgraph
8895                         self._root = root
8896                         self._match_cache = {}
8897                         self._cpv_pkg_map = {}
8898
8899                 def _clear_cache(self):
8900                         self._match_cache.clear()
8901                         self._cpv_pkg_map.clear()
8902
8903                 def match(self, atom):
8904                         ret = self._match_cache.get(atom)
8905                         if ret is not None:
8906                                 return ret[:]
8907                         orig_atom = atom
8908                         if "/" not in atom:
8909                                 atom = self._dep_expand(atom)
8910                         pkg, existing = self._depgraph._select_package(self._root, atom)
8911                         if not pkg:
8912                                 ret = []
8913                         else:
8914                                 # Return the highest available from select_package() as well as
8915                                 # any matching slots in the graph db.
8916                                 slots = set()
8917                                 slots.add(pkg.metadata["SLOT"])
8918                                 atom_cp = portage.dep_getkey(atom)
8919                                 if pkg.cp.startswith("virtual/"):
8920                                         # For new-style virtual lookahead that occurs inside
8921                                         # dep_check(), examine all slots. This is needed
8922                                         # so that newer slots will not unnecessarily be pulled in
8923                                         # when a satisfying lower slot is already installed. For
8924                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8925                                         # there's no need to pull in a newer slot to satisfy a
8926                                         # virtual/jdk dependency.
8927                                         for db, pkg_type, built, installed, db_keys in \
8928                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8929                                                 for cpv in db.match(atom):
8930                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8931                                                                 continue
8932                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8933                                 ret = []
8934                                 if self._visible(pkg):
8935                                         self._cpv_pkg_map[pkg.cpv] = pkg
8936                                         ret.append(pkg.cpv)
8937                                 slots.remove(pkg.metadata["SLOT"])
8938                                 while slots:
8939                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8940                                         pkg, existing = self._depgraph._select_package(
8941                                                 self._root, slot_atom)
8942                                         if not pkg:
8943                                                 continue
8944                                         if not self._visible(pkg):
8945                                                 continue
8946                                         self._cpv_pkg_map[pkg.cpv] = pkg
8947                                         ret.append(pkg.cpv)
8948                                 if ret:
8949                                         self._cpv_sort_ascending(ret)
8950                         self._match_cache[orig_atom] = ret
8951                         return ret[:]
8952
8953                 def _visible(self, pkg):
8954                         if pkg.installed and "selective" not in self._depgraph.myparams:
8955                                 try:
8956                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8957                                 except (StopIteration, portage.exception.InvalidDependString):
8958                                         arg = None
8959                                 if arg:
8960                                         return False
8961                         if pkg.installed:
8962                                 try:
8963                                         if not visible(
8964                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8965                                                 return False
8966                                 except portage.exception.InvalidDependString:
8967                                         pass
8968                         in_graph = self._depgraph._slot_pkg_map[
8969                                 self._root].get(pkg.slot_atom)
8970                         if in_graph is None:
8971                                 # Mask choices for packages which are not the highest visible
8972                                 # version within their slot (since they usually trigger slot
8973                                 # conflicts).
8974                                 highest_visible, in_graph = self._depgraph._select_package(
8975                                         self._root, pkg.slot_atom)
8976                                 if pkg != highest_visible:
8977                                         return False
8978                         elif in_graph != pkg:
8979                                 # Mask choices for packages that would trigger a slot
8980                                 # conflict with a previously selected package.
8981                                 return False
8982                         return True
8983
8984                 def _dep_expand(self, atom):
8985                         """
8986                         This is only needed for old installed packages that may
8987                         contain atoms that are not fully qualified with a specific
8988                         category. Emulate the cpv_expand() function that's used by
8989                         dbapi.match() in cases like this. If there are multiple
8990                         matches, it's often due to a new-style virtual that has
8991                         been added, so try to filter those out to avoid raising
8992                         a ValueError.
8993                         """
8994                         root_config = self._depgraph.roots[self._root]
8995                         orig_atom = atom
8996                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8997                         if len(expanded_atoms) > 1:
8998                                 non_virtual_atoms = []
8999                                 for x in expanded_atoms:
9000                                         if not portage.dep_getkey(x).startswith("virtual/"):
9001                                                 non_virtual_atoms.append(x)
9002                                 if len(non_virtual_atoms) == 1:
9003                                         expanded_atoms = non_virtual_atoms
9004                         if len(expanded_atoms) > 1:
9005                                 # compatible with portage.cpv_expand()
9006                                 raise portage.exception.AmbiguousPackageName(
9007                                         [portage.dep_getkey(x) for x in expanded_atoms])
9008                         if expanded_atoms:
9009                                 atom = expanded_atoms[0]
9010                         else:
9011                                 null_atom = insert_category_into_atom(atom, "null")
9012                                 null_cp = portage.dep_getkey(null_atom)
9013                                 cat, atom_pn = portage.catsplit(null_cp)
9014                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9015                                 if virts_p:
9016                                         # Allow the resolver to choose which virtual.
9017                                         atom = insert_category_into_atom(atom, "virtual")
9018                                 else:
9019                                         atom = insert_category_into_atom(atom, "null")
9020                         return atom
9021
9022                 def aux_get(self, cpv, wants):
9023                         metadata = self._cpv_pkg_map[cpv].metadata
9024                         return [metadata.get(x, "") for x in wants]
9025
9026 class RepoDisplay(object):
9027         def __init__(self, roots):
9028                 self._shown_repos = {}
9029                 self._unknown_repo = False
9030                 repo_paths = set()
9031                 for root_config in roots.itervalues():
9032                         portdir = root_config.settings.get("PORTDIR")
9033                         if portdir:
9034                                 repo_paths.add(portdir)
9035                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9036                         if overlays:
9037                                 repo_paths.update(overlays.split())
9038                 repo_paths = list(repo_paths)
9039                 self._repo_paths = repo_paths
9040                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9041                         for repo_path in repo_paths ]
9042
9043                 # pre-allocate index for PORTDIR so that it always has index 0.
9044                 for root_config in roots.itervalues():
9045                         portdb = root_config.trees["porttree"].dbapi
9046                         portdir = portdb.porttree_root
9047                         if portdir:
9048                                 self.repoStr(portdir)
9049
9050         def repoStr(self, repo_path_real):
9051                 real_index = -1
9052                 if repo_path_real:
9053                         real_index = self._repo_paths_real.index(repo_path_real)
9054                 if real_index == -1:
9055                         s = "?"
9056                         self._unknown_repo = True
9057                 else:
9058                         shown_repos = self._shown_repos
9059                         repo_paths = self._repo_paths
9060                         repo_path = repo_paths[real_index]
9061                         index = shown_repos.get(repo_path)
9062                         if index is None:
9063                                 index = len(shown_repos)
9064                                 shown_repos[repo_path] = index
9065                         s = str(index)
9066                 return s
9067
9068         def __str__(self):
9069                 output = []
9070                 shown_repos = self._shown_repos
9071                 unknown_repo = self._unknown_repo
9072                 if shown_repos or self._unknown_repo:
9073                         output.append("Portage tree and overlays:\n")
9074                 show_repo_paths = list(shown_repos)
9075                 for repo_path, repo_index in shown_repos.iteritems():
9076                         show_repo_paths[repo_index] = repo_path
9077                 if show_repo_paths:
9078                         for index, repo_path in enumerate(show_repo_paths):
9079                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9080                 if unknown_repo:
9081                         output.append(" "+teal("[?]") + \
9082                                 " indicates that the source repository could not be determined\n")
9083                 return "".join(output)
9084
9085 class PackageCounters(object):
9086
9087         def __init__(self):
9088                 self.upgrades   = 0
9089                 self.downgrades = 0
9090                 self.new        = 0
9091                 self.newslot    = 0
9092                 self.reinst     = 0
9093                 self.uninst     = 0
9094                 self.blocks     = 0
9095                 self.blocks_satisfied         = 0
9096                 self.totalsize  = 0
9097                 self.restrict_fetch           = 0
9098                 self.restrict_fetch_satisfied = 0
9099                 self.interactive              = 0
9100
9101         def __str__(self):
9102                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9103                 myoutput = []
9104                 details = []
9105                 myoutput.append("Total: %s package" % total_installs)
9106                 if total_installs != 1:
9107                         myoutput.append("s")
9108                 if total_installs != 0:
9109                         myoutput.append(" (")
9110                 if self.upgrades > 0:
9111                         details.append("%s upgrade" % self.upgrades)
9112                         if self.upgrades > 1:
9113                                 details[-1] += "s"
9114                 if self.downgrades > 0:
9115                         details.append("%s downgrade" % self.downgrades)
9116                         if self.downgrades > 1:
9117                                 details[-1] += "s"
9118                 if self.new > 0:
9119                         details.append("%s new" % self.new)
9120                 if self.newslot > 0:
9121                         details.append("%s in new slot" % self.newslot)
9122                         if self.newslot > 1:
9123                                 details[-1] += "s"
9124                 if self.reinst > 0:
9125                         details.append("%s reinstall" % self.reinst)
9126                         if self.reinst > 1:
9127                                 details[-1] += "s"
9128                 if self.uninst > 0:
9129                         details.append("%s uninstall" % self.uninst)
9130                         if self.uninst > 1:
9131                                 details[-1] += "s"
9132                 if self.interactive > 0:
9133                         details.append("%s %s" % (self.interactive,
9134                                 colorize("WARN", "interactive")))
9135                 myoutput.append(", ".join(details))
9136                 if total_installs != 0:
9137                         myoutput.append(")")
9138                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9139                 if self.restrict_fetch:
9140                         myoutput.append("\nFetch Restriction: %s package" % \
9141                                 self.restrict_fetch)
9142                         if self.restrict_fetch > 1:
9143                                 myoutput.append("s")
9144                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9145                         myoutput.append(bad(" (%s unsatisfied)") % \
9146                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9147                 if self.blocks > 0:
9148                         myoutput.append("\nConflict: %s block" % \
9149                                 self.blocks)
9150                         if self.blocks > 1:
9151                                 myoutput.append("s")
9152                         if self.blocks_satisfied < self.blocks:
9153                                 myoutput.append(bad(" (%s unsatisfied)") % \
9154                                         (self.blocks - self.blocks_satisfied))
9155                 return "".join(myoutput)
9156
9157 class PollSelectAdapter(PollConstants):
9158
9159         """
9160         Use select to emulate a poll object, for
9161         systems that don't support poll().
9162         """
9163
9164         def __init__(self):
9165                 self._registered = {}
9166                 self._select_args = [[], [], []]
9167
9168         def register(self, fd, *args):
9169                 """
9170                 Only POLLIN is currently supported!
9171                 """
9172                 if len(args) > 1:
9173                         raise TypeError(
9174                                 "register expected at most 2 arguments, got " + \
9175                                 repr(1 + len(args)))
9176
9177                 eventmask = PollConstants.POLLIN | \
9178                         PollConstants.POLLPRI | PollConstants.POLLOUT
9179                 if args:
9180                         eventmask = args[0]
9181
9182                 self._registered[fd] = eventmask
9183                 self._select_args = None
9184
9185         def unregister(self, fd):
9186                 self._select_args = None
9187                 del self._registered[fd]
9188
9189         def poll(self, *args):
9190                 if len(args) > 1:
9191                         raise TypeError(
9192                                 "poll expected at most 2 arguments, got " + \
9193                                 repr(1 + len(args)))
9194
9195                 timeout = None
9196                 if args:
9197                         timeout = args[0]
9198
9199                 select_args = self._select_args
9200                 if select_args is None:
9201                         select_args = [self._registered.keys(), [], []]
9202
9203                 if timeout is not None:
9204                         select_args = select_args[:]
9205                         # Translate poll() timeout args to select() timeout args:
9206                         #
9207                         #          | units        | value(s) for indefinite block
9208                         # ---------|--------------|------------------------------
9209                         #   poll   | milliseconds | omitted, negative, or None
9210                         # ---------|--------------|------------------------------
9211                         #   select | seconds      | omitted
9212                         # ---------|--------------|------------------------------
9213
9214                         if timeout is not None and timeout < 0:
9215                                 timeout = None
9216                         if timeout is not None:
9217                                 select_args.append(timeout / 1000)
9218
9219                 select_events = select.select(*select_args)
9220                 poll_events = []
9221                 for fd in select_events[0]:
9222                         poll_events.append((fd, PollConstants.POLLIN))
9223                 return poll_events
9224
9225 class SequentialTaskQueue(SlotObject):
9226
9227         __slots__ = ("max_jobs", "running_tasks") + \
9228                 ("_dirty", "_scheduling", "_task_queue")
9229
9230         def __init__(self, **kwargs):
9231                 SlotObject.__init__(self, **kwargs)
9232                 self._task_queue = deque()
9233                 self.running_tasks = set()
9234                 if self.max_jobs is None:
9235                         self.max_jobs = 1
9236                 self._dirty = True
9237
9238         def add(self, task):
9239                 self._task_queue.append(task)
9240                 self._dirty = True
9241
9242         def addFront(self, task):
9243                 self._task_queue.appendleft(task)
9244                 self._dirty = True
9245
9246         def schedule(self):
9247
9248                 if not self._dirty:
9249                         return False
9250
9251                 if not self:
9252                         return False
9253
9254                 if self._scheduling:
9255                         # Ignore any recursive schedule() calls triggered via
9256                         # self._task_exit().
9257                         return False
9258
9259                 self._scheduling = True
9260
9261                 task_queue = self._task_queue
9262                 running_tasks = self.running_tasks
9263                 max_jobs = self.max_jobs
9264                 state_changed = False
9265
9266                 while task_queue and \
9267                         (max_jobs is True or len(running_tasks) < max_jobs):
9268                         task = task_queue.popleft()
9269                         cancelled = getattr(task, "cancelled", None)
9270                         if not cancelled:
9271                                 running_tasks.add(task)
9272                                 task.addExitListener(self._task_exit)
9273                                 task.start()
9274                         state_changed = True
9275
9276                 self._dirty = False
9277                 self._scheduling = False
9278
9279                 return state_changed
9280
9281         def _task_exit(self, task):
9282                 """
9283                 Since we can always rely on exit listeners being called, the set of
9284                 running tasks is always pruned automatically and there is never any need
9285                 to actively prune it.
9286                 """
9287                 self.running_tasks.remove(task)
9288                 if self._task_queue:
9289                         self._dirty = True
9290
9291         def clear(self):
9292                 self._task_queue.clear()
9293                 running_tasks = self.running_tasks
9294                 while running_tasks:
9295                         task = running_tasks.pop()
9296                         task.removeExitListener(self._task_exit)
9297                         task.cancel()
9298                 self._dirty = False
9299
9300         def __nonzero__(self):
9301                 return bool(self._task_queue or self.running_tasks)
9302
9303         def __len__(self):
9304                 return len(self._task_queue) + len(self.running_tasks)
9305
9306 _can_poll_device = None
9307
9308 def can_poll_device():
9309         """
9310         Test if it's possible to use poll() on a device such as a pty. This
9311         is known to fail on Darwin.
9312         @rtype: bool
9313         @returns: True if poll() on a device succeeds, False otherwise.
9314         """
9315
9316         global _can_poll_device
9317         if _can_poll_device is not None:
9318                 return _can_poll_device
9319
9320         if not hasattr(select, "poll"):
9321                 _can_poll_device = False
9322                 return _can_poll_device
9323
9324         try:
9325                 dev_null = open('/dev/null', 'rb')
9326         except IOError:
9327                 _can_poll_device = False
9328                 return _can_poll_device
9329
9330         p = select.poll()
9331         p.register(dev_null.fileno(), PollConstants.POLLIN)
9332
9333         invalid_request = False
9334         for f, event in p.poll():
9335                 if event & PollConstants.POLLNVAL:
9336                         invalid_request = True
9337                         break
9338         dev_null.close()
9339
9340         _can_poll_device = not invalid_request
9341         return _can_poll_device
9342
9343 def create_poll_instance():
9344         """
9345         Create an instance of select.poll, or an instance of
9346         PollSelectAdapter there is no poll() implementation or
9347         it is broken somehow.
9348         """
9349         if can_poll_device():
9350                 return select.poll()
9351         return PollSelectAdapter()
9352
9353 getloadavg = getattr(os, "getloadavg", None)
9354 if getloadavg is None:
9355         def getloadavg():
9356                 """
9357                 Uses /proc/loadavg to emulate os.getloadavg().
9358                 Raises OSError if the load average was unobtainable.
9359                 """
9360                 try:
9361                         loadavg_str = open('/proc/loadavg').readline()
9362                 except IOError:
9363                         # getloadavg() is only supposed to raise OSError, so convert
9364                         raise OSError('unknown')
9365                 loadavg_split = loadavg_str.split()
9366                 if len(loadavg_split) < 3:
9367                         raise OSError('unknown')
9368                 loadavg_floats = []
9369                 for i in xrange(3):
9370                         try:
9371                                 loadavg_floats.append(float(loadavg_split[i]))
9372                         except ValueError:
9373                                 raise OSError('unknown')
9374                 return tuple(loadavg_floats)
9375
9376 class PollScheduler(object):
9377
9378         class _sched_iface_class(SlotObject):
9379                 __slots__ = ("register", "schedule", "unregister")
9380
9381         def __init__(self):
9382                 self._max_jobs = 1
9383                 self._max_load = None
9384                 self._jobs = 0
9385                 self._poll_event_queue = []
9386                 self._poll_event_handlers = {}
9387                 self._poll_event_handler_ids = {}
9388                 # Increment id for each new handler.
9389                 self._event_handler_id = 0
9390                 self._poll_obj = create_poll_instance()
9391                 self._scheduling = False
9392
9393         def _schedule(self):
9394                 """
9395                 Calls _schedule_tasks() and automatically returns early from
9396                 any recursive calls to this method that the _schedule_tasks()
9397                 call might trigger. This makes _schedule() safe to call from
9398                 inside exit listeners.
9399                 """
9400                 if self._scheduling:
9401                         return False
9402                 self._scheduling = True
9403                 try:
9404                         return self._schedule_tasks()
9405                 finally:
9406                         self._scheduling = False
9407
9408         def _running_job_count(self):
9409                 return self._jobs
9410
9411         def _can_add_job(self):
9412                 max_jobs = self._max_jobs
9413                 max_load = self._max_load
9414
9415                 if self._max_jobs is not True and \
9416                         self._running_job_count() >= self._max_jobs:
9417                         return False
9418
9419                 if max_load is not None and \
9420                         (max_jobs is True or max_jobs > 1) and \
9421                         self._running_job_count() >= 1:
9422                         try:
9423                                 avg1, avg5, avg15 = getloadavg()
9424                         except OSError:
9425                                 return False
9426
9427                         if avg1 >= max_load:
9428                                 return False
9429
9430                 return True
9431
9432         def _poll(self, timeout=None):
9433                 """
9434                 All poll() calls pass through here. The poll events
9435                 are added directly to self._poll_event_queue.
9436                 In order to avoid endless blocking, this raises
9437                 StopIteration if timeout is None and there are
9438                 no file descriptors to poll.
9439                 """
9440                 if not self._poll_event_handlers:
9441                         self._schedule()
9442                         if timeout is None and \
9443                                 not self._poll_event_handlers:
9444                                 raise StopIteration(
9445                                         "timeout is None and there are no poll() event handlers")
9446
9447                 # The following error is known to occur with Linux kernel versions
9448                 # less than 2.6.24:
9449                 #
9450                 #   select.error: (4, 'Interrupted system call')
9451                 #
9452                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9453                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9454                 # without any events.
9455                 while True:
9456                         try:
9457                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9458                                 break
9459                         except select.error, e:
9460                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9461                                         level=logging.ERROR, noiselevel=-1)
9462                                 del e
9463                                 if timeout is not None:
9464                                         break
9465
9466         def _next_poll_event(self, timeout=None):
9467                 """
9468                 Since the _schedule_wait() loop is called by event
9469                 handlers from _poll_loop(), maintain a central event
9470                 queue for both of them to share events from a single
9471                 poll() call. In order to avoid endless blocking, this
9472                 raises StopIteration if timeout is None and there are
9473                 no file descriptors to poll.
9474                 """
9475                 if not self._poll_event_queue:
9476                         self._poll(timeout)
9477                 return self._poll_event_queue.pop()
9478
9479         def _poll_loop(self):
9480
9481                 event_handlers = self._poll_event_handlers
9482                 event_handled = False
9483
9484                 try:
9485                         while event_handlers:
9486                                 f, event = self._next_poll_event()
9487                                 handler, reg_id = event_handlers[f]
9488                                 handler(f, event)
9489                                 event_handled = True
9490                 except StopIteration:
9491                         event_handled = True
9492
9493                 if not event_handled:
9494                         raise AssertionError("tight loop")
9495
9496         def _schedule_yield(self):
9497                 """
9498                 Schedule for a short period of time chosen by the scheduler based
9499                 on internal state. Synchronous tasks should call this periodically
9500                 in order to allow the scheduler to service pending poll events. The
9501                 scheduler will call poll() exactly once, without blocking, and any
9502                 resulting poll events will be serviced.
9503                 """
9504                 event_handlers = self._poll_event_handlers
9505                 events_handled = 0
9506
9507                 if not event_handlers:
9508                         return bool(events_handled)
9509
9510                 if not self._poll_event_queue:
9511                         self._poll(0)
9512
9513                 try:
9514                         while event_handlers and self._poll_event_queue:
9515                                 f, event = self._next_poll_event()
9516                                 handler, reg_id = event_handlers[f]
9517                                 handler(f, event)
9518                                 events_handled += 1
9519                 except StopIteration:
9520                         events_handled += 1
9521
9522                 return bool(events_handled)
9523
9524         def _register(self, f, eventmask, handler):
9525                 """
9526                 @rtype: Integer
9527                 @return: A unique registration id, for use in schedule() or
9528                         unregister() calls.
9529                 """
9530                 if f in self._poll_event_handlers:
9531                         raise AssertionError("fd %d is already registered" % f)
9532                 self._event_handler_id += 1
9533                 reg_id = self._event_handler_id
9534                 self._poll_event_handler_ids[reg_id] = f
9535                 self._poll_event_handlers[f] = (handler, reg_id)
9536                 self._poll_obj.register(f, eventmask)
9537                 return reg_id
9538
9539         def _unregister(self, reg_id):
9540                 f = self._poll_event_handler_ids[reg_id]
9541                 self._poll_obj.unregister(f)
9542                 del self._poll_event_handlers[f]
9543                 del self._poll_event_handler_ids[reg_id]
9544
9545         def _schedule_wait(self, wait_ids):
9546                 """
9547                 Schedule until wait_id is not longer registered
9548                 for poll() events.
9549                 @type wait_id: int
9550                 @param wait_id: a task id to wait for
9551                 """
9552                 event_handlers = self._poll_event_handlers
9553                 handler_ids = self._poll_event_handler_ids
9554                 event_handled = False
9555
9556                 if isinstance(wait_ids, int):
9557                         wait_ids = frozenset([wait_ids])
9558
9559                 try:
9560                         while wait_ids.intersection(handler_ids):
9561                                 f, event = self._next_poll_event()
9562                                 handler, reg_id = event_handlers[f]
9563                                 handler(f, event)
9564                                 event_handled = True
9565                 except StopIteration:
9566                         event_handled = True
9567
9568                 return event_handled
9569
9570 class QueueScheduler(PollScheduler):
9571
9572         """
9573         Add instances of SequentialTaskQueue and then call run(). The
9574         run() method returns when no tasks remain.
9575         """
9576
9577         def __init__(self, max_jobs=None, max_load=None):
9578                 PollScheduler.__init__(self)
9579
9580                 if max_jobs is None:
9581                         max_jobs = 1
9582
9583                 self._max_jobs = max_jobs
9584                 self._max_load = max_load
9585                 self.sched_iface = self._sched_iface_class(
9586                         register=self._register,
9587                         schedule=self._schedule_wait,
9588                         unregister=self._unregister)
9589
9590                 self._queues = []
9591                 self._schedule_listeners = []
9592
9593         def add(self, q):
9594                 self._queues.append(q)
9595
9596         def remove(self, q):
9597                 self._queues.remove(q)
9598
9599         def run(self):
9600
9601                 while self._schedule():
9602                         self._poll_loop()
9603
9604                 while self._running_job_count():
9605                         self._poll_loop()
9606
9607         def _schedule_tasks(self):
9608                 """
9609                 @rtype: bool
9610                 @returns: True if there may be remaining tasks to schedule,
9611                         False otherwise.
9612                 """
9613                 while self._can_add_job():
9614                         n = self._max_jobs - self._running_job_count()
9615                         if n < 1:
9616                                 break
9617
9618                         if not self._start_next_job(n):
9619                                 return False
9620
9621                 for q in self._queues:
9622                         if q:
9623                                 return True
9624                 return False
9625
9626         def _running_job_count(self):
9627                 job_count = 0
9628                 for q in self._queues:
9629                         job_count += len(q.running_tasks)
9630                 self._jobs = job_count
9631                 return job_count
9632
9633         def _start_next_job(self, n=1):
9634                 started_count = 0
9635                 for q in self._queues:
9636                         initial_job_count = len(q.running_tasks)
9637                         q.schedule()
9638                         final_job_count = len(q.running_tasks)
9639                         if final_job_count > initial_job_count:
9640                                 started_count += (final_job_count - initial_job_count)
9641                         if started_count >= n:
9642                                 break
9643                 return started_count
9644
9645 class TaskScheduler(object):
9646
9647         """
9648         A simple way to handle scheduling of AsynchrousTask instances. Simply
9649         add tasks and call run(). The run() method returns when no tasks remain.
9650         """
9651
9652         def __init__(self, max_jobs=None, max_load=None):
9653                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9654                 self._scheduler = QueueScheduler(
9655                         max_jobs=max_jobs, max_load=max_load)
9656                 self.sched_iface = self._scheduler.sched_iface
9657                 self.run = self._scheduler.run
9658                 self._scheduler.add(self._queue)
9659
9660         def add(self, task):
9661                 self._queue.add(task)
9662
9663 class JobStatusDisplay(object):
9664
9665         _bound_properties = ("curval", "failed", "running")
9666         _jobs_column_width = 48
9667
9668         # Don't update the display unless at least this much
9669         # time has passed, in units of seconds.
9670         _min_display_latency = 2
9671
9672         _default_term_codes = {
9673                 'cr'  : '\r',
9674                 'el'  : '\x1b[K',
9675                 'nel' : '\n',
9676         }
9677
9678         _termcap_name_map = {
9679                 'carriage_return' : 'cr',
9680                 'clr_eol'         : 'el',
9681                 'newline'         : 'nel',
9682         }
9683
9684         def __init__(self, out=sys.stdout, quiet=False):
9685                 object.__setattr__(self, "out", out)
9686                 object.__setattr__(self, "quiet", quiet)
9687                 object.__setattr__(self, "maxval", 0)
9688                 object.__setattr__(self, "merges", 0)
9689                 object.__setattr__(self, "_changed", False)
9690                 object.__setattr__(self, "_displayed", False)
9691                 object.__setattr__(self, "_last_display_time", 0)
9692                 object.__setattr__(self, "width", 80)
9693                 self.reset()
9694
9695                 isatty = hasattr(out, "isatty") and out.isatty()
9696                 object.__setattr__(self, "_isatty", isatty)
9697                 if not isatty or not self._init_term():
9698                         term_codes = {}
9699                         for k, capname in self._termcap_name_map.iteritems():
9700                                 term_codes[k] = self._default_term_codes[capname]
9701                         object.__setattr__(self, "_term_codes", term_codes)
9702                 encoding = sys.getdefaultencoding()
9703                 for k, v in self._term_codes.items():
9704                         if not isinstance(v, str):
9705                                 self._term_codes[k] = v.decode(encoding, 'replace')
9706
9707         def _init_term(self):
9708                 """
9709                 Initialize term control codes.
9710                 @rtype: bool
9711                 @returns: True if term codes were successfully initialized,
9712                         False otherwise.
9713                 """
9714
9715                 term_type = os.environ.get("TERM", "vt100")
9716                 tigetstr = None
9717
9718                 try:
9719                         import curses
9720                         try:
9721                                 curses.setupterm(term_type, self.out.fileno())
9722                                 tigetstr = curses.tigetstr
9723                         except curses.error:
9724                                 pass
9725                 except ImportError:
9726                         pass
9727
9728                 if tigetstr is None:
9729                         return False
9730
9731                 term_codes = {}
9732                 for k, capname in self._termcap_name_map.iteritems():
9733                         code = tigetstr(capname)
9734                         if code is None:
9735                                 code = self._default_term_codes[capname]
9736                         term_codes[k] = code
9737                 object.__setattr__(self, "_term_codes", term_codes)
9738                 return True
9739
9740         def _format_msg(self, msg):
9741                 return ">>> %s" % msg
9742
9743         def _erase(self):
9744                 self.out.write(
9745                         self._term_codes['carriage_return'] + \
9746                         self._term_codes['clr_eol'])
9747                 self.out.flush()
9748                 self._displayed = False
9749
9750         def _display(self, line):
9751                 self.out.write(line)
9752                 self.out.flush()
9753                 self._displayed = True
9754
9755         def _update(self, msg):
9756
9757                 out = self.out
9758                 if not self._isatty:
9759                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9760                         self.out.flush()
9761                         self._displayed = True
9762                         return
9763
9764                 if self._displayed:
9765                         self._erase()
9766
9767                 self._display(self._format_msg(msg))
9768
9769         def displayMessage(self, msg):
9770
9771                 was_displayed = self._displayed
9772
9773                 if self._isatty and self._displayed:
9774                         self._erase()
9775
9776                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9777                 self.out.flush()
9778                 self._displayed = False
9779
9780                 if was_displayed:
9781                         self._changed = True
9782                         self.display()
9783
9784         def reset(self):
9785                 self.maxval = 0
9786                 self.merges = 0
9787                 for name in self._bound_properties:
9788                         object.__setattr__(self, name, 0)
9789
9790                 if self._displayed:
9791                         self.out.write(self._term_codes['newline'])
9792                         self.out.flush()
9793                         self._displayed = False
9794
9795         def __setattr__(self, name, value):
9796                 old_value = getattr(self, name)
9797                 if value == old_value:
9798                         return
9799                 object.__setattr__(self, name, value)
9800                 if name in self._bound_properties:
9801                         self._property_change(name, old_value, value)
9802
9803         def _property_change(self, name, old_value, new_value):
9804                 self._changed = True
9805                 self.display()
9806
9807         def _load_avg_str(self):
9808                 try:
9809                         avg = getloadavg()
9810                 except OSError:
9811                         return 'unknown'
9812
9813                 max_avg = max(avg)
9814
9815                 if max_avg < 10:
9816                         digits = 2
9817                 elif max_avg < 100:
9818                         digits = 1
9819                 else:
9820                         digits = 0
9821
9822                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9823
9824         def display(self):
9825                 """
9826                 Display status on stdout, but only if something has
9827                 changed since the last call.
9828                 """
9829
9830                 if self.quiet:
9831                         return
9832
9833                 current_time = time.time()
9834                 time_delta = current_time - self._last_display_time
9835                 if self._displayed and \
9836                         not self._changed:
9837                         if not self._isatty:
9838                                 return
9839                         if time_delta < self._min_display_latency:
9840                                 return
9841
9842                 self._last_display_time = current_time
9843                 self._changed = False
9844                 self._display_status()
9845
9846         def _display_status(self):
9847                 # Don't use len(self._completed_tasks) here since that also
9848                 # can include uninstall tasks.
9849                 curval_str = str(self.curval)
9850                 maxval_str = str(self.maxval)
9851                 running_str = str(self.running)
9852                 failed_str = str(self.failed)
9853                 load_avg_str = self._load_avg_str()
9854
9855                 color_output = StringIO()
9856                 plain_output = StringIO()
9857                 style_file = portage.output.ConsoleStyleFile(color_output)
9858                 style_file.write_listener = plain_output
9859                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9860                 style_writer.style_listener = style_file.new_styles
9861                 f = formatter.AbstractFormatter(style_writer)
9862
9863                 number_style = "INFORM"
9864                 f.add_literal_data("Jobs: ")
9865                 f.push_style(number_style)
9866                 f.add_literal_data(curval_str)
9867                 f.pop_style()
9868                 f.add_literal_data(" of ")
9869                 f.push_style(number_style)
9870                 f.add_literal_data(maxval_str)
9871                 f.pop_style()
9872                 f.add_literal_data(" complete")
9873
9874                 if self.running:
9875                         f.add_literal_data(", ")
9876                         f.push_style(number_style)
9877                         f.add_literal_data(running_str)
9878                         f.pop_style()
9879                         f.add_literal_data(" running")
9880
9881                 if self.failed:
9882                         f.add_literal_data(", ")
9883                         f.push_style(number_style)
9884                         f.add_literal_data(failed_str)
9885                         f.pop_style()
9886                         f.add_literal_data(" failed")
9887
9888                 padding = self._jobs_column_width - len(plain_output.getvalue())
9889                 if padding > 0:
9890                         f.add_literal_data(padding * " ")
9891
9892                 f.add_literal_data("Load avg: ")
9893                 f.add_literal_data(load_avg_str)
9894
9895                 # Truncate to fit width, to avoid making the terminal scroll if the
9896                 # line overflows (happens when the load average is large).
9897                 plain_output = plain_output.getvalue()
9898                 if self._isatty and len(plain_output) > self.width:
9899                         # Use plain_output here since it's easier to truncate
9900                         # properly than the color output which contains console
9901                         # color codes.
9902                         self._update(plain_output[:self.width])
9903                 else:
9904                         self._update(color_output.getvalue())
9905
9906                 xtermTitle(" ".join(plain_output.split()))
9907
9908 class Scheduler(PollScheduler):
9909
9910         _opts_ignore_blockers = \
9911                 frozenset(["--buildpkgonly",
9912                 "--fetchonly", "--fetch-all-uri",
9913                 "--nodeps", "--pretend"])
9914
9915         _opts_no_background = \
9916                 frozenset(["--pretend",
9917                 "--fetchonly", "--fetch-all-uri"])
9918
9919         _opts_no_restart = frozenset(["--buildpkgonly",
9920                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9921
9922         _bad_resume_opts = set(["--ask", "--changelog",
9923                 "--resume", "--skipfirst"])
9924
9925         _fetch_log = "/var/log/emerge-fetch.log"
9926
9927         class _iface_class(SlotObject):
9928                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9929                         "dblinkElog", "fetch", "register", "schedule",
9930                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9931                         "unregister")
9932
9933         class _fetch_iface_class(SlotObject):
9934                 __slots__ = ("log_file", "schedule")
9935
9936         _task_queues_class = slot_dict_class(
9937                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9938
9939         class _build_opts_class(SlotObject):
9940                 __slots__ = ("buildpkg", "buildpkgonly",
9941                         "fetch_all_uri", "fetchonly", "pretend")
9942
9943         class _binpkg_opts_class(SlotObject):
9944                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9945
9946         class _pkg_count_class(SlotObject):
9947                 __slots__ = ("curval", "maxval")
9948
9949         class _emerge_log_class(SlotObject):
9950                 __slots__ = ("xterm_titles",)
9951
9952                 def log(self, *pargs, **kwargs):
9953                         if not self.xterm_titles:
9954                                 # Avoid interference with the scheduler's status display.
9955                                 kwargs.pop("short_msg", None)
9956                         emergelog(self.xterm_titles, *pargs, **kwargs)
9957
9958         class _failed_pkg(SlotObject):
9959                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9960
9961         class _ConfigPool(object):
9962                 """Interface for a task to temporarily allocate a config
9963                 instance from a pool. This allows a task to be constructed
9964                 long before the config instance actually becomes needed, like
9965                 when prefetchers are constructed for the whole merge list."""
9966                 __slots__ = ("_root", "_allocate", "_deallocate")
9967                 def __init__(self, root, allocate, deallocate):
9968                         self._root = root
9969                         self._allocate = allocate
9970                         self._deallocate = deallocate
9971                 def allocate(self):
9972                         return self._allocate(self._root)
9973                 def deallocate(self, settings):
9974                         self._deallocate(settings)
9975
9976         class _unknown_internal_error(portage.exception.PortageException):
9977                 """
9978                 Used internally to terminate scheduling. The specific reason for
9979                 the failure should have been dumped to stderr.
9980                 """
9981                 def __init__(self, value=""):
9982                         portage.exception.PortageException.__init__(self, value)
9983
9984         def __init__(self, settings, trees, mtimedb, myopts,
9985                 spinner, mergelist, favorites, digraph):
9986                 PollScheduler.__init__(self)
9987                 self.settings = settings
9988                 self.target_root = settings["ROOT"]
9989                 self.trees = trees
9990                 self.myopts = myopts
9991                 self._spinner = spinner
9992                 self._mtimedb = mtimedb
9993                 self._mergelist = mergelist
9994                 self._favorites = favorites
9995                 self._args_set = InternalPackageSet(favorites)
9996                 self._build_opts = self._build_opts_class()
9997                 for k in self._build_opts.__slots__:
9998                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9999                 self._binpkg_opts = self._binpkg_opts_class()
10000                 for k in self._binpkg_opts.__slots__:
10001                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10002
10003                 self.curval = 0
10004                 self._logger = self._emerge_log_class()
10005                 self._task_queues = self._task_queues_class()
10006                 for k in self._task_queues.allowed_keys:
10007                         setattr(self._task_queues, k,
10008                                 SequentialTaskQueue())
10009
10010                 # Holds merges that will wait to be executed when no builds are
10011                 # executing. This is useful for system packages since dependencies
10012                 # on system packages are frequently unspecified.
10013                 self._merge_wait_queue = []
10014                 # Holds merges that have been transfered from the merge_wait_queue to
10015                 # the actual merge queue. They are removed from this list upon
10016                 # completion. Other packages can start building only when this list is
10017                 # empty.
10018                 self._merge_wait_scheduled = []
10019
10020                 # Holds system packages and their deep runtime dependencies. Before
10021                 # being merged, these packages go to merge_wait_queue, to be merged
10022                 # when no other packages are building.
10023                 self._deep_system_deps = set()
10024
10025                 # Holds packages to merge which will satisfy currently unsatisfied
10026                 # deep runtime dependencies of system packages. If this is not empty
10027                 # then no parallel builds will be spawned until it is empty. This
10028                 # minimizes the possibility that a build will fail due to the system
10029                 # being in a fragile state. For example, see bug #259954.
10030                 self._unsatisfied_system_deps = set()
10031
10032                 self._status_display = JobStatusDisplay()
10033                 self._max_load = myopts.get("--load-average")
10034                 max_jobs = myopts.get("--jobs")
10035                 if max_jobs is None:
10036                         max_jobs = 1
10037                 self._set_max_jobs(max_jobs)
10038
10039                 # The root where the currently running
10040                 # portage instance is installed.
10041                 self._running_root = trees["/"]["root_config"]
10042                 self.edebug = 0
10043                 if settings.get("PORTAGE_DEBUG", "") == "1":
10044                         self.edebug = 1
10045                 self.pkgsettings = {}
10046                 self._config_pool = {}
10047                 self._blocker_db = {}
10048                 for root in trees:
10049                         self._config_pool[root] = []
10050                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10051
10052                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10053                         schedule=self._schedule_fetch)
10054                 self._sched_iface = self._iface_class(
10055                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10056                         dblinkDisplayMerge=self._dblink_display_merge,
10057                         dblinkElog=self._dblink_elog,
10058                         fetch=fetch_iface, register=self._register,
10059                         schedule=self._schedule_wait,
10060                         scheduleSetup=self._schedule_setup,
10061                         scheduleUnpack=self._schedule_unpack,
10062                         scheduleYield=self._schedule_yield,
10063                         unregister=self._unregister)
10064
10065                 self._prefetchers = weakref.WeakValueDictionary()
10066                 self._pkg_queue = []
10067                 self._completed_tasks = set()
10068
10069                 self._failed_pkgs = []
10070                 self._failed_pkgs_all = []
10071                 self._failed_pkgs_die_msgs = []
10072                 self._post_mod_echo_msgs = []
10073                 self._parallel_fetch = False
10074                 merge_count = len([x for x in mergelist \
10075                         if isinstance(x, Package) and x.operation == "merge"])
10076                 self._pkg_count = self._pkg_count_class(
10077                         curval=0, maxval=merge_count)
10078                 self._status_display.maxval = self._pkg_count.maxval
10079
10080                 # The load average takes some time to respond when new
10081                 # jobs are added, so we need to limit the rate of adding
10082                 # new jobs.
10083                 self._job_delay_max = 10
10084                 self._job_delay_factor = 1.0
10085                 self._job_delay_exp = 1.5
10086                 self._previous_job_start_time = None
10087
10088                 self._set_digraph(digraph)
10089
10090                 # This is used to memoize the _choose_pkg() result when
10091                 # no packages can be chosen until one of the existing
10092                 # jobs completes.
10093                 self._choose_pkg_return_early = False
10094
10095                 features = self.settings.features
10096                 if "parallel-fetch" in features and \
10097                         not ("--pretend" in self.myopts or \
10098                         "--fetch-all-uri" in self.myopts or \
10099                         "--fetchonly" in self.myopts):
10100                         if "distlocks" not in features:
10101                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10102                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10103                                         "requires the distlocks feature enabled"+"\n",
10104                                         noiselevel=-1)
10105                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10106                                         "thus parallel-fetching is being disabled"+"\n",
10107                                         noiselevel=-1)
10108                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10109                         elif len(mergelist) > 1:
10110                                 self._parallel_fetch = True
10111
10112                 if self._parallel_fetch:
10113                                 # clear out existing fetch log if it exists
10114                                 try:
10115                                         open(self._fetch_log, 'w')
10116                                 except EnvironmentError:
10117                                         pass
10118
10119                 self._running_portage = None
10120                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10121                         portage.const.PORTAGE_PACKAGE_ATOM)
10122                 if portage_match:
10123                         cpv = portage_match.pop()
10124                         self._running_portage = self._pkg(cpv, "installed",
10125                                 self._running_root, installed=True)
10126
10127         def _poll(self, timeout=None):
10128                 self._schedule()
10129                 PollScheduler._poll(self, timeout=timeout)
10130
10131         def _set_max_jobs(self, max_jobs):
10132                 self._max_jobs = max_jobs
10133                 self._task_queues.jobs.max_jobs = max_jobs
10134
10135         def _background_mode(self):
10136                 """
10137                 Check if background mode is enabled and adjust states as necessary.
10138
10139                 @rtype: bool
10140                 @returns: True if background mode is enabled, False otherwise.
10141                 """
10142                 background = (self._max_jobs is True or \
10143                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10144                         not bool(self._opts_no_background.intersection(self.myopts))
10145
10146                 if background:
10147                         interactive_tasks = self._get_interactive_tasks()
10148                         if interactive_tasks:
10149                                 background = False
10150                                 writemsg_level(">>> Sending package output to stdio due " + \
10151                                         "to interactive package(s):\n",
10152                                         level=logging.INFO, noiselevel=-1)
10153                                 msg = [""]
10154                                 for pkg in interactive_tasks:
10155                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10156                                         if pkg.root != "/":
10157                                                 pkg_str += " for " + pkg.root
10158                                         msg.append(pkg_str)
10159                                 msg.append("")
10160                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10161                                         level=logging.INFO, noiselevel=-1)
10162                                 if self._max_jobs is True or self._max_jobs > 1:
10163                                         self._set_max_jobs(1)
10164                                         writemsg_level(">>> Setting --jobs=1 due " + \
10165                                                 "to the above interactive package(s)\n",
10166                                                 level=logging.INFO, noiselevel=-1)
10167
10168                 self._status_display.quiet = \
10169                         not background or \
10170                         ("--quiet" in self.myopts and \
10171                         "--verbose" not in self.myopts)
10172
10173                 self._logger.xterm_titles = \
10174                         "notitles" not in self.settings.features and \
10175                         self._status_display.quiet
10176
10177                 return background
10178
10179         def _get_interactive_tasks(self):
10180                 from portage import flatten
10181                 from portage.dep import use_reduce, paren_reduce
10182                 interactive_tasks = []
10183                 for task in self._mergelist:
10184                         if not (isinstance(task, Package) and \
10185                                 task.operation == "merge"):
10186                                 continue
10187                         try:
10188                                 properties = flatten(use_reduce(paren_reduce(
10189                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10190                         except portage.exception.InvalidDependString, e:
10191                                 show_invalid_depstring_notice(task,
10192                                         task.metadata["PROPERTIES"], str(e))
10193                                 raise self._unknown_internal_error()
10194                         if "interactive" in properties:
10195                                 interactive_tasks.append(task)
10196                 return interactive_tasks
10197
10198         def _set_digraph(self, digraph):
10199                 if "--nodeps" in self.myopts or \
10200                         (self._max_jobs is not True and self._max_jobs < 2):
10201                         # save some memory
10202                         self._digraph = None
10203                         return
10204
10205                 self._digraph = digraph
10206                 self._find_system_deps()
10207                 self._prune_digraph()
10208                 self._prevent_builddir_collisions()
10209
10210         def _find_system_deps(self):
10211                 """
10212                 Find system packages and their deep runtime dependencies. Before being
10213                 merged, these packages go to merge_wait_queue, to be merged when no
10214                 other packages are building.
10215                 """
10216                 deep_system_deps = self._deep_system_deps
10217                 deep_system_deps.clear()
10218                 deep_system_deps.update(
10219                         _find_deep_system_runtime_deps(self._digraph))
10220                 deep_system_deps.difference_update([pkg for pkg in \
10221                         deep_system_deps if pkg.operation != "merge"])
10222
10223         def _prune_digraph(self):
10224                 """
10225                 Prune any root nodes that are irrelevant.
10226                 """
10227
10228                 graph = self._digraph
10229                 completed_tasks = self._completed_tasks
10230                 removed_nodes = set()
10231                 while True:
10232                         for node in graph.root_nodes():
10233                                 if not isinstance(node, Package) or \
10234                                         (node.installed and node.operation == "nomerge") or \
10235                                         node.onlydeps or \
10236                                         node in completed_tasks:
10237                                         removed_nodes.add(node)
10238                         if removed_nodes:
10239                                 graph.difference_update(removed_nodes)
10240                         if not removed_nodes:
10241                                 break
10242                         removed_nodes.clear()
10243
10244         def _prevent_builddir_collisions(self):
10245                 """
10246                 When building stages, sometimes the same exact cpv needs to be merged
10247                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10248                 in the builddir. Currently, normal file locks would be inappropriate
10249                 for this purpose since emerge holds all of it's build dir locks from
10250                 the main process.
10251                 """
10252                 cpv_map = {}
10253                 for pkg in self._mergelist:
10254                         if not isinstance(pkg, Package):
10255                                 # a satisfied blocker
10256                                 continue
10257                         if pkg.installed:
10258                                 continue
10259                         if pkg.cpv not in cpv_map:
10260                                 cpv_map[pkg.cpv] = [pkg]
10261                                 continue
10262                         for earlier_pkg in cpv_map[pkg.cpv]:
10263                                 self._digraph.add(earlier_pkg, pkg,
10264                                         priority=DepPriority(buildtime=True))
10265                         cpv_map[pkg.cpv].append(pkg)
10266
10267         class _pkg_failure(portage.exception.PortageException):
10268                 """
10269                 An instance of this class is raised by unmerge() when
10270                 an uninstallation fails.
10271                 """
10272                 status = 1
10273                 def __init__(self, *pargs):
10274                         portage.exception.PortageException.__init__(self, pargs)
10275                         if pargs:
10276                                 self.status = pargs[0]
10277
10278         def _schedule_fetch(self, fetcher):
10279                 """
10280                 Schedule a fetcher on the fetch queue, in order to
10281                 serialize access to the fetch log.
10282                 """
10283                 self._task_queues.fetch.addFront(fetcher)
10284
10285         def _schedule_setup(self, setup_phase):
10286                 """
10287                 Schedule a setup phase on the merge queue, in order to
10288                 serialize unsandboxed access to the live filesystem.
10289                 """
10290                 self._task_queues.merge.addFront(setup_phase)
10291                 self._schedule()
10292
10293         def _schedule_unpack(self, unpack_phase):
10294                 """
10295                 Schedule an unpack phase on the unpack queue, in order
10296                 to serialize $DISTDIR access for live ebuilds.
10297                 """
10298                 self._task_queues.unpack.add(unpack_phase)
10299
10300         def _find_blockers(self, new_pkg):
10301                 """
10302                 Returns a callable which should be called only when
10303                 the vdb lock has been acquired.
10304                 """
10305                 def get_blockers():
10306                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10307                 return get_blockers
10308
10309         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10310                 if self._opts_ignore_blockers.intersection(self.myopts):
10311                         return None
10312
10313                 # Call gc.collect() here to avoid heap overflow that
10314                 # triggers 'Cannot allocate memory' errors (reported
10315                 # with python-2.5).
10316                 import gc
10317                 gc.collect()
10318
10319                 blocker_db = self._blocker_db[new_pkg.root]
10320
10321                 blocker_dblinks = []
10322                 for blocking_pkg in blocker_db.findInstalledBlockers(
10323                         new_pkg, acquire_lock=acquire_lock):
10324                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10325                                 continue
10326                         if new_pkg.cpv == blocking_pkg.cpv:
10327                                 continue
10328                         blocker_dblinks.append(portage.dblink(
10329                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10330                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10331                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10332
10333                 gc.collect()
10334
10335                 return blocker_dblinks
10336
10337         def _dblink_pkg(self, pkg_dblink):
10338                 cpv = pkg_dblink.mycpv
10339                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10340                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10341                 installed = type_name == "installed"
10342                 return self._pkg(cpv, type_name, root_config, installed=installed)
10343
10344         def _append_to_log_path(self, log_path, msg):
10345                 f = open(log_path, 'a')
10346                 try:
10347                         f.write(msg)
10348                 finally:
10349                         f.close()
10350
10351         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10352
10353                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10354                 log_file = None
10355                 out = sys.stdout
10356                 background = self._background
10357
10358                 if background and log_path is not None:
10359                         log_file = open(log_path, 'a')
10360                         out = log_file
10361
10362                 try:
10363                         for msg in msgs:
10364                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10365                 finally:
10366                         if log_file is not None:
10367                                 log_file.close()
10368
10369         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10370                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10371                 background = self._background
10372
10373                 if log_path is None:
10374                         if not (background and level < logging.WARN):
10375                                 portage.util.writemsg_level(msg,
10376                                         level=level, noiselevel=noiselevel)
10377                 else:
10378                         if not background:
10379                                 portage.util.writemsg_level(msg,
10380                                         level=level, noiselevel=noiselevel)
10381                         self._append_to_log_path(log_path, msg)
10382
10383         def _dblink_ebuild_phase(self,
10384                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10385                 """
10386                 Using this callback for merge phases allows the scheduler
10387                 to run while these phases execute asynchronously, and allows
10388                 the scheduler control output handling.
10389                 """
10390
10391                 scheduler = self._sched_iface
10392                 settings = pkg_dblink.settings
10393                 pkg = self._dblink_pkg(pkg_dblink)
10394                 background = self._background
10395                 log_path = settings.get("PORTAGE_LOG_FILE")
10396
10397                 ebuild_phase = EbuildPhase(background=background,
10398                         pkg=pkg, phase=phase, scheduler=scheduler,
10399                         settings=settings, tree=pkg_dblink.treetype)
10400                 ebuild_phase.start()
10401                 ebuild_phase.wait()
10402
10403                 return ebuild_phase.returncode
10404
10405         def _check_manifests(self):
10406                 # Verify all the manifests now so that the user is notified of failure
10407                 # as soon as possible.
10408                 if "strict" not in self.settings.features or \
10409                         "--fetchonly" in self.myopts or \
10410                         "--fetch-all-uri" in self.myopts:
10411                         return os.EX_OK
10412
10413                 shown_verifying_msg = False
10414                 quiet_settings = {}
10415                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10416                         quiet_config = portage.config(clone=pkgsettings)
10417                         quiet_config["PORTAGE_QUIET"] = "1"
10418                         quiet_config.backup_changes("PORTAGE_QUIET")
10419                         quiet_settings[myroot] = quiet_config
10420                         del quiet_config
10421
10422                 for x in self._mergelist:
10423                         if not isinstance(x, Package) or \
10424                                 x.type_name != "ebuild":
10425                                 continue
10426
10427                         if not shown_verifying_msg:
10428                                 shown_verifying_msg = True
10429                                 self._status_msg("Verifying ebuild manifests")
10430
10431                         root_config = x.root_config
10432                         portdb = root_config.trees["porttree"].dbapi
10433                         quiet_config = quiet_settings[root_config.root]
10434                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10435                         if not portage.digestcheck([], quiet_config, strict=True):
10436                                 return 1
10437
10438                 return os.EX_OK
10439
10440         def _add_prefetchers(self):
10441
10442                 if not self._parallel_fetch:
10443                         return
10444
10445                 if self._parallel_fetch:
10446                         self._status_msg("Starting parallel fetch")
10447
10448                         prefetchers = self._prefetchers
10449                         getbinpkg = "--getbinpkg" in self.myopts
10450
10451                         # In order to avoid "waiting for lock" messages
10452                         # at the beginning, which annoy users, never
10453                         # spawn a prefetcher for the first package.
10454                         for pkg in self._mergelist[1:]:
10455                                 prefetcher = self._create_prefetcher(pkg)
10456                                 if prefetcher is not None:
10457                                         self._task_queues.fetch.add(prefetcher)
10458                                         prefetchers[pkg] = prefetcher
10459
10460         def _create_prefetcher(self, pkg):
10461                 """
10462                 @return: a prefetcher, or None if not applicable
10463                 """
10464                 prefetcher = None
10465
10466                 if not isinstance(pkg, Package):
10467                         pass
10468
10469                 elif pkg.type_name == "ebuild":
10470
10471                         prefetcher = EbuildFetcher(background=True,
10472                                 config_pool=self._ConfigPool(pkg.root,
10473                                 self._allocate_config, self._deallocate_config),
10474                                 fetchonly=1, logfile=self._fetch_log,
10475                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10476
10477                 elif pkg.type_name == "binary" and \
10478                         "--getbinpkg" in self.myopts and \
10479                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10480
10481                         prefetcher = BinpkgPrefetcher(background=True,
10482                                 pkg=pkg, scheduler=self._sched_iface)
10483
10484                 return prefetcher
10485
10486         def _is_restart_scheduled(self):
10487                 """
10488                 Check if the merge list contains a replacement
10489                 for the current running instance, that will result
10490                 in restart after merge.
10491                 @rtype: bool
10492                 @returns: True if a restart is scheduled, False otherwise.
10493                 """
10494                 if self._opts_no_restart.intersection(self.myopts):
10495                         return False
10496
10497                 mergelist = self._mergelist
10498
10499                 for i, pkg in enumerate(mergelist):
10500                         if self._is_restart_necessary(pkg) and \
10501                                 i != len(mergelist) - 1:
10502                                 return True
10503
10504                 return False
10505
10506         def _is_restart_necessary(self, pkg):
10507                 """
10508                 @return: True if merging the given package
10509                         requires restart, False otherwise.
10510                 """
10511
10512                 # Figure out if we need a restart.
10513                 if pkg.root == self._running_root.root and \
10514                         portage.match_from_list(
10515                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10516                         if self._running_portage:
10517                                 return pkg.cpv != self._running_portage.cpv
10518                         return True
10519                 return False
10520
10521         def _restart_if_necessary(self, pkg):
10522                 """
10523                 Use execv() to restart emerge. This happens
10524                 if portage upgrades itself and there are
10525                 remaining packages in the list.
10526                 """
10527
10528                 if self._opts_no_restart.intersection(self.myopts):
10529                         return
10530
10531                 if not self._is_restart_necessary(pkg):
10532                         return
10533
10534                 if pkg == self._mergelist[-1]:
10535                         return
10536
10537                 self._main_loop_cleanup()
10538
10539                 logger = self._logger
10540                 pkg_count = self._pkg_count
10541                 mtimedb = self._mtimedb
10542                 bad_resume_opts = self._bad_resume_opts
10543
10544                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10545                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10546
10547                 logger.log(" *** RESTARTING " + \
10548                         "emerge via exec() after change of " + \
10549                         "portage version.")
10550
10551                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10552                 mtimedb.commit()
10553                 portage.run_exitfuncs()
10554                 mynewargv = [sys.argv[0], "--resume"]
10555                 resume_opts = self.myopts.copy()
10556                 # For automatic resume, we need to prevent
10557                 # any of bad_resume_opts from leaking in
10558                 # via EMERGE_DEFAULT_OPTS.
10559                 resume_opts["--ignore-default-opts"] = True
10560                 for myopt, myarg in resume_opts.iteritems():
10561                         if myopt not in bad_resume_opts:
10562                                 if myarg is True:
10563                                         mynewargv.append(myopt)
10564                                 else:
10565                                         mynewargv.append(myopt +"="+ str(myarg))
10566                 # priority only needs to be adjusted on the first run
10567                 os.environ["PORTAGE_NICENESS"] = "0"
10568                 os.execv(mynewargv[0], mynewargv)
10569
10570         def merge(self):
10571
10572                 if "--resume" in self.myopts:
10573                         # We're resuming.
10574                         portage.writemsg_stdout(
10575                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10576                         self._logger.log(" *** Resuming merge...")
10577
10578                 self._save_resume_list()
10579
10580                 try:
10581                         self._background = self._background_mode()
10582                 except self._unknown_internal_error:
10583                         return 1
10584
10585                 for root in self.trees:
10586                         root_config = self.trees[root]["root_config"]
10587
10588                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10589                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10590                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10591                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10592                         if not tmpdir or not os.path.isdir(tmpdir):
10593                                 msg = "The directory specified in your " + \
10594                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10595                                 "does not exist. Please create this " + \
10596                                 "directory or correct your PORTAGE_TMPDIR setting."
10597                                 msg = textwrap.wrap(msg, 70)
10598                                 out = portage.output.EOutput()
10599                                 for l in msg:
10600                                         out.eerror(l)
10601                                 return 1
10602
10603                         if self._background:
10604                                 root_config.settings.unlock()
10605                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10606                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10607                                 root_config.settings.lock()
10608
10609                         self.pkgsettings[root] = portage.config(
10610                                 clone=root_config.settings)
10611
10612                 rval = self._check_manifests()
10613                 if rval != os.EX_OK:
10614                         return rval
10615
10616                 keep_going = "--keep-going" in self.myopts
10617                 fetchonly = self._build_opts.fetchonly
10618                 mtimedb = self._mtimedb
10619                 failed_pkgs = self._failed_pkgs
10620
10621                 while True:
10622                         rval = self._merge()
10623                         if rval == os.EX_OK or fetchonly or not keep_going:
10624                                 break
10625                         if "resume" not in mtimedb:
10626                                 break
10627                         mergelist = self._mtimedb["resume"].get("mergelist")
10628                         if not mergelist:
10629                                 break
10630
10631                         if not failed_pkgs:
10632                                 break
10633
10634                         for failed_pkg in failed_pkgs:
10635                                 mergelist.remove(list(failed_pkg.pkg))
10636
10637                         self._failed_pkgs_all.extend(failed_pkgs)
10638                         del failed_pkgs[:]
10639
10640                         if not mergelist:
10641                                 break
10642
10643                         if not self._calc_resume_list():
10644                                 break
10645
10646                         clear_caches(self.trees)
10647                         if not self._mergelist:
10648                                 break
10649
10650                         self._save_resume_list()
10651                         self._pkg_count.curval = 0
10652                         self._pkg_count.maxval = len([x for x in self._mergelist \
10653                                 if isinstance(x, Package) and x.operation == "merge"])
10654                         self._status_display.maxval = self._pkg_count.maxval
10655
10656                 self._logger.log(" *** Finished. Cleaning up...")
10657
10658                 if failed_pkgs:
10659                         self._failed_pkgs_all.extend(failed_pkgs)
10660                         del failed_pkgs[:]
10661
10662                 background = self._background
10663                 failure_log_shown = False
10664                 if background and len(self._failed_pkgs_all) == 1:
10665                         # If only one package failed then just show it's
10666                         # whole log for easy viewing.
10667                         failed_pkg = self._failed_pkgs_all[-1]
10668                         build_dir = failed_pkg.build_dir
10669                         log_file = None
10670
10671                         log_paths = [failed_pkg.build_log]
10672
10673                         log_path = self._locate_failure_log(failed_pkg)
10674                         if log_path is not None:
10675                                 try:
10676                                         log_file = open(log_path)
10677                                 except IOError:
10678                                         pass
10679
10680                         if log_file is not None:
10681                                 try:
10682                                         for line in log_file:
10683                                                 writemsg_level(line, noiselevel=-1)
10684                                 finally:
10685                                         log_file.close()
10686                                 failure_log_shown = True
10687
10688                 # Dump mod_echo output now since it tends to flood the terminal.
10689                 # This allows us to avoid having more important output, generated
10690                 # later, from being swept away by the mod_echo output.
10691                 mod_echo_output =  _flush_elog_mod_echo()
10692
10693                 if background and not failure_log_shown and \
10694                         self._failed_pkgs_all and \
10695                         self._failed_pkgs_die_msgs and \
10696                         not mod_echo_output:
10697
10698                         printer = portage.output.EOutput()
10699                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10700                                 root_msg = ""
10701                                 if mysettings["ROOT"] != "/":
10702                                         root_msg = " merged to %s" % mysettings["ROOT"]
10703                                 print
10704                                 printer.einfo("Error messages for package %s%s:" % \
10705                                         (colorize("INFORM", key), root_msg))
10706                                 print
10707                                 for phase in portage.const.EBUILD_PHASES:
10708                                         if phase not in logentries:
10709                                                 continue
10710                                         for msgtype, msgcontent in logentries[phase]:
10711                                                 if isinstance(msgcontent, basestring):
10712                                                         msgcontent = [msgcontent]
10713                                                 for line in msgcontent:
10714                                                         printer.eerror(line.strip("\n"))
10715
10716                 if self._post_mod_echo_msgs:
10717                         for msg in self._post_mod_echo_msgs:
10718                                 msg()
10719
10720                 if len(self._failed_pkgs_all) > 1 or \
10721                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10722                         if len(self._failed_pkgs_all) > 1:
10723                                 msg = "The following %d packages have " % \
10724                                         len(self._failed_pkgs_all) + \
10725                                         "failed to build or install:"
10726                         else:
10727                                 msg = "The following package has " + \
10728                                         "failed to build or install:"
10729                         prefix = bad(" * ")
10730                         writemsg(prefix + "\n", noiselevel=-1)
10731                         from textwrap import wrap
10732                         for line in wrap(msg, 72):
10733                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10734                         writemsg(prefix + "\n", noiselevel=-1)
10735                         for failed_pkg in self._failed_pkgs_all:
10736                                 writemsg("%s\t%s\n" % (prefix,
10737                                         colorize("INFORM", str(failed_pkg.pkg))),
10738                                         noiselevel=-1)
10739                         writemsg(prefix + "\n", noiselevel=-1)
10740
10741                 return rval
10742
10743         def _elog_listener(self, mysettings, key, logentries, fulltext):
10744                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10745                 if errors:
10746                         self._failed_pkgs_die_msgs.append(
10747                                 (mysettings, key, errors))
10748
10749         def _locate_failure_log(self, failed_pkg):
10750
10751                 build_dir = failed_pkg.build_dir
10752                 log_file = None
10753
10754                 log_paths = [failed_pkg.build_log]
10755
10756                 for log_path in log_paths:
10757                         if not log_path:
10758                                 continue
10759
10760                         try:
10761                                 log_size = os.stat(log_path).st_size
10762                         except OSError:
10763                                 continue
10764
10765                         if log_size == 0:
10766                                 continue
10767
10768                         return log_path
10769
10770                 return None
10771
10772         def _add_packages(self):
10773                 pkg_queue = self._pkg_queue
10774                 for pkg in self._mergelist:
10775                         if isinstance(pkg, Package):
10776                                 pkg_queue.append(pkg)
10777                         elif isinstance(pkg, Blocker):
10778                                 pass
10779
10780         def _system_merge_started(self, merge):
10781                 """
10782                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10783                 """
10784                 graph = self._digraph
10785                 if graph is None:
10786                         return
10787                 pkg = merge.merge.pkg
10788
10789                 # Skip this if $ROOT != / since it shouldn't matter if there
10790                 # are unsatisfied system runtime deps in this case.
10791                 if pkg.root != '/':
10792                         return
10793
10794                 completed_tasks = self._completed_tasks
10795                 unsatisfied = self._unsatisfied_system_deps
10796
10797                 def ignore_non_runtime_or_satisfied(priority):
10798                         """
10799                         Ignore non-runtime and satisfied runtime priorities.
10800                         """
10801                         if isinstance(priority, DepPriority) and \
10802                                 not priority.satisfied and \
10803                                 (priority.runtime or priority.runtime_post):
10804                                 return False
10805                         return True
10806
10807                 # When checking for unsatisfied runtime deps, only check
10808                 # direct deps since indirect deps are checked when the
10809                 # corresponding parent is merged.
10810                 for child in graph.child_nodes(pkg,
10811                         ignore_priority=ignore_non_runtime_or_satisfied):
10812                         if not isinstance(child, Package) or \
10813                                 child.operation == 'uninstall':
10814                                 continue
10815                         if child is pkg:
10816                                 continue
10817                         if child.operation == 'merge' and \
10818                                 child not in completed_tasks:
10819                                 unsatisfied.add(child)
10820
10821         def _merge_wait_exit_handler(self, task):
10822                 self._merge_wait_scheduled.remove(task)
10823                 self._merge_exit(task)
10824
10825         def _merge_exit(self, merge):
10826                 self._do_merge_exit(merge)
10827                 self._deallocate_config(merge.merge.settings)
10828                 if merge.returncode == os.EX_OK and \
10829                         not merge.merge.pkg.installed:
10830                         self._status_display.curval += 1
10831                 self._status_display.merges = len(self._task_queues.merge)
10832                 self._schedule()
10833
10834         def _do_merge_exit(self, merge):
10835                 pkg = merge.merge.pkg
10836                 if merge.returncode != os.EX_OK:
10837                         settings = merge.merge.settings
10838                         build_dir = settings.get("PORTAGE_BUILDDIR")
10839                         build_log = settings.get("PORTAGE_LOG_FILE")
10840
10841                         self._failed_pkgs.append(self._failed_pkg(
10842                                 build_dir=build_dir, build_log=build_log,
10843                                 pkg=pkg,
10844                                 returncode=merge.returncode))
10845                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10846
10847                         self._status_display.failed = len(self._failed_pkgs)
10848                         return
10849
10850                 self._task_complete(pkg)
10851                 pkg_to_replace = merge.merge.pkg_to_replace
10852                 if pkg_to_replace is not None:
10853                         # When a package is replaced, mark it's uninstall
10854                         # task complete (if any).
10855                         uninst_hash_key = \
10856                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10857                         self._task_complete(uninst_hash_key)
10858
10859                 if pkg.installed:
10860                         return
10861
10862                 self._restart_if_necessary(pkg)
10863
10864                 # Call mtimedb.commit() after each merge so that
10865                 # --resume still works after being interrupted
10866                 # by reboot, sigkill or similar.
10867                 mtimedb = self._mtimedb
10868                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10869                 if not mtimedb["resume"]["mergelist"]:
10870                         del mtimedb["resume"]
10871                 mtimedb.commit()
10872
10873         def _build_exit(self, build):
10874                 if build.returncode == os.EX_OK:
10875                         self.curval += 1
10876                         merge = PackageMerge(merge=build)
10877                         if not build.build_opts.buildpkgonly and \
10878                                 build.pkg in self._deep_system_deps:
10879                                 # Since dependencies on system packages are frequently
10880                                 # unspecified, merge them only when no builds are executing.
10881                                 self._merge_wait_queue.append(merge)
10882                                 merge.addStartListener(self._system_merge_started)
10883                         else:
10884                                 merge.addExitListener(self._merge_exit)
10885                                 self._task_queues.merge.add(merge)
10886                                 self._status_display.merges = len(self._task_queues.merge)
10887                 else:
10888                         settings = build.settings
10889                         build_dir = settings.get("PORTAGE_BUILDDIR")
10890                         build_log = settings.get("PORTAGE_LOG_FILE")
10891
10892                         self._failed_pkgs.append(self._failed_pkg(
10893                                 build_dir=build_dir, build_log=build_log,
10894                                 pkg=build.pkg,
10895                                 returncode=build.returncode))
10896                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10897
10898                         self._status_display.failed = len(self._failed_pkgs)
10899                         self._deallocate_config(build.settings)
10900                 self._jobs -= 1
10901                 self._status_display.running = self._jobs
10902                 self._schedule()
10903
10904         def _extract_exit(self, build):
10905                 self._build_exit(build)
10906
10907         def _task_complete(self, pkg):
10908                 self._completed_tasks.add(pkg)
10909                 self._unsatisfied_system_deps.discard(pkg)
10910                 self._choose_pkg_return_early = False
10911
10912         def _merge(self):
10913
10914                 self._add_prefetchers()
10915                 self._add_packages()
10916                 pkg_queue = self._pkg_queue
10917                 failed_pkgs = self._failed_pkgs
10918                 portage.locks._quiet = self._background
10919                 portage.elog._emerge_elog_listener = self._elog_listener
10920                 rval = os.EX_OK
10921
10922                 try:
10923                         self._main_loop()
10924                 finally:
10925                         self._main_loop_cleanup()
10926                         portage.locks._quiet = False
10927                         portage.elog._emerge_elog_listener = None
10928                         if failed_pkgs:
10929                                 rval = failed_pkgs[-1].returncode
10930
10931                 return rval
10932
10933         def _main_loop_cleanup(self):
10934                 del self._pkg_queue[:]
10935                 self._completed_tasks.clear()
10936                 self._deep_system_deps.clear()
10937                 self._unsatisfied_system_deps.clear()
10938                 self._choose_pkg_return_early = False
10939                 self._status_display.reset()
10940                 self._digraph = None
10941                 self._task_queues.fetch.clear()
10942
10943         def _choose_pkg(self):
10944                 """
10945                 Choose a task that has all it's dependencies satisfied.
10946                 """
10947
10948                 if self._choose_pkg_return_early:
10949                         return None
10950
10951                 if self._digraph is None:
10952                         if (self._jobs or self._task_queues.merge) and \
10953                                 not ("--nodeps" in self.myopts and \
10954                                 (self._max_jobs is True or self._max_jobs > 1)):
10955                                 self._choose_pkg_return_early = True
10956                                 return None
10957                         return self._pkg_queue.pop(0)
10958
10959                 if not (self._jobs or self._task_queues.merge):
10960                         return self._pkg_queue.pop(0)
10961
10962                 self._prune_digraph()
10963
10964                 chosen_pkg = None
10965                 later = set(self._pkg_queue)
10966                 for pkg in self._pkg_queue:
10967                         later.remove(pkg)
10968                         if not self._dependent_on_scheduled_merges(pkg, later):
10969                                 chosen_pkg = pkg
10970                                 break
10971
10972                 if chosen_pkg is not None:
10973                         self._pkg_queue.remove(chosen_pkg)
10974
10975                 if chosen_pkg is None:
10976                         # There's no point in searching for a package to
10977                         # choose until at least one of the existing jobs
10978                         # completes.
10979                         self._choose_pkg_return_early = True
10980
10981                 return chosen_pkg
10982
10983         def _dependent_on_scheduled_merges(self, pkg, later):
10984                 """
10985                 Traverse the subgraph of the given packages deep dependencies
10986                 to see if it contains any scheduled merges.
10987                 @param pkg: a package to check dependencies for
10988                 @type pkg: Package
10989                 @param later: packages for which dependence should be ignored
10990                         since they will be merged later than pkg anyway and therefore
10991                         delaying the merge of pkg will not result in a more optimal
10992                         merge order
10993                 @type later: set
10994                 @rtype: bool
10995                 @returns: True if the package is dependent, False otherwise.
10996                 """
10997
10998                 graph = self._digraph
10999                 completed_tasks = self._completed_tasks
11000
11001                 dependent = False
11002                 traversed_nodes = set([pkg])
11003                 direct_deps = graph.child_nodes(pkg)
11004                 node_stack = direct_deps
11005                 direct_deps = frozenset(direct_deps)
11006                 while node_stack:
11007                         node = node_stack.pop()
11008                         if node in traversed_nodes:
11009                                 continue
11010                         traversed_nodes.add(node)
11011                         if not ((node.installed and node.operation == "nomerge") or \
11012                                 (node.operation == "uninstall" and \
11013                                 node not in direct_deps) or \
11014                                 node in completed_tasks or \
11015                                 node in later):
11016                                 dependent = True
11017                                 break
11018                         node_stack.extend(graph.child_nodes(node))
11019
11020                 return dependent
11021
11022         def _allocate_config(self, root):
11023                 """
11024                 Allocate a unique config instance for a task in order
11025                 to prevent interference between parallel tasks.
11026                 """
11027                 if self._config_pool[root]:
11028                         temp_settings = self._config_pool[root].pop()
11029                 else:
11030                         temp_settings = portage.config(clone=self.pkgsettings[root])
11031                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11032                 # performance reasons, call it here to make sure all settings from the
11033                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11034                 temp_settings.reload()
11035                 temp_settings.reset()
11036                 return temp_settings
11037
11038         def _deallocate_config(self, settings):
11039                 self._config_pool[settings["ROOT"]].append(settings)
11040
11041         def _main_loop(self):
11042
11043                 # Only allow 1 job max if a restart is scheduled
11044                 # due to portage update.
11045                 if self._is_restart_scheduled() or \
11046                         self._opts_no_background.intersection(self.myopts):
11047                         self._set_max_jobs(1)
11048
11049                 merge_queue = self._task_queues.merge
11050
11051                 while self._schedule():
11052                         if self._poll_event_handlers:
11053                                 self._poll_loop()
11054
11055                 while True:
11056                         self._schedule()
11057                         if not (self._jobs or merge_queue):
11058                                 break
11059                         if self._poll_event_handlers:
11060                                 self._poll_loop()
11061
11062         def _keep_scheduling(self):
11063                 return bool(self._pkg_queue and \
11064                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11065
11066         def _schedule_tasks(self):
11067
11068                 # When the number of jobs drops to zero, process all waiting merges.
11069                 if not self._jobs and self._merge_wait_queue:
11070                         for task in self._merge_wait_queue:
11071                                 task.addExitListener(self._merge_wait_exit_handler)
11072                                 self._task_queues.merge.add(task)
11073                         self._status_display.merges = len(self._task_queues.merge)
11074                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11075                         del self._merge_wait_queue[:]
11076
11077                 self._schedule_tasks_imp()
11078                 self._status_display.display()
11079
11080                 state_change = 0
11081                 for q in self._task_queues.values():
11082                         if q.schedule():
11083                                 state_change += 1
11084
11085                 # Cancel prefetchers if they're the only reason
11086                 # the main poll loop is still running.
11087                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11088                         not (self._jobs or self._task_queues.merge) and \
11089                         self._task_queues.fetch:
11090                         self._task_queues.fetch.clear()
11091                         state_change += 1
11092
11093                 if state_change:
11094                         self._schedule_tasks_imp()
11095                         self._status_display.display()
11096
11097                 return self._keep_scheduling()
11098
11099         def _job_delay(self):
11100                 """
11101                 @rtype: bool
11102                 @returns: True if job scheduling should be delayed, False otherwise.
11103                 """
11104
11105                 if self._jobs and self._max_load is not None:
11106
11107                         current_time = time.time()
11108
11109                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11110                         if delay > self._job_delay_max:
11111                                 delay = self._job_delay_max
11112                         if (current_time - self._previous_job_start_time) < delay:
11113                                 return True
11114
11115                 return False
11116
11117         def _schedule_tasks_imp(self):
11118                 """
11119                 @rtype: bool
11120                 @returns: True if state changed, False otherwise.
11121                 """
11122
11123                 state_change = 0
11124
11125                 while True:
11126
11127                         if not self._keep_scheduling():
11128                                 return bool(state_change)
11129
11130                         if self._choose_pkg_return_early or \
11131                                 self._merge_wait_scheduled or \
11132                                 (self._jobs and self._unsatisfied_system_deps) or \
11133                                 not self._can_add_job() or \
11134                                 self._job_delay():
11135                                 return bool(state_change)
11136
11137                         pkg = self._choose_pkg()
11138                         if pkg is None:
11139                                 return bool(state_change)
11140
11141                         state_change += 1
11142
11143                         if not pkg.installed:
11144                                 self._pkg_count.curval += 1
11145
11146                         task = self._task(pkg)
11147
11148                         if pkg.installed:
11149                                 merge = PackageMerge(merge=task)
11150                                 merge.addExitListener(self._merge_exit)
11151                                 self._task_queues.merge.add(merge)
11152
11153                         elif pkg.built:
11154                                 self._jobs += 1
11155                                 self._previous_job_start_time = time.time()
11156                                 self._status_display.running = self._jobs
11157                                 task.addExitListener(self._extract_exit)
11158                                 self._task_queues.jobs.add(task)
11159
11160                         else:
11161                                 self._jobs += 1
11162                                 self._previous_job_start_time = time.time()
11163                                 self._status_display.running = self._jobs
11164                                 task.addExitListener(self._build_exit)
11165                                 self._task_queues.jobs.add(task)
11166
11167                 return bool(state_change)
11168
11169         def _task(self, pkg):
11170
11171                 pkg_to_replace = None
11172                 if pkg.operation != "uninstall":
11173                         vardb = pkg.root_config.trees["vartree"].dbapi
11174                         previous_cpv = vardb.match(pkg.slot_atom)
11175                         if previous_cpv:
11176                                 previous_cpv = previous_cpv.pop()
11177                                 pkg_to_replace = self._pkg(previous_cpv,
11178                                         "installed", pkg.root_config, installed=True)
11179
11180                 task = MergeListItem(args_set=self._args_set,
11181                         background=self._background, binpkg_opts=self._binpkg_opts,
11182                         build_opts=self._build_opts,
11183                         config_pool=self._ConfigPool(pkg.root,
11184                         self._allocate_config, self._deallocate_config),
11185                         emerge_opts=self.myopts,
11186                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11187                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11188                         pkg_to_replace=pkg_to_replace,
11189                         prefetcher=self._prefetchers.get(pkg),
11190                         scheduler=self._sched_iface,
11191                         settings=self._allocate_config(pkg.root),
11192                         statusMessage=self._status_msg,
11193                         world_atom=self._world_atom)
11194
11195                 return task
11196
11197         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11198                 pkg = failed_pkg.pkg
11199                 msg = "%s to %s %s" % \
11200                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11201                 if pkg.root != "/":
11202                         msg += " %s %s" % (preposition, pkg.root)
11203
11204                 log_path = self._locate_failure_log(failed_pkg)
11205                 if log_path is not None:
11206                         msg += ", Log file:"
11207                 self._status_msg(msg)
11208
11209                 if log_path is not None:
11210                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11211
11212         def _status_msg(self, msg):
11213                 """
11214                 Display a brief status message (no newlines) in the status display.
11215                 This is called by tasks to provide feedback to the user. This
11216                 delegates the resposibility of generating \r and \n control characters,
11217                 to guarantee that lines are created or erased when necessary and
11218                 appropriate.
11219
11220                 @type msg: str
11221                 @param msg: a brief status message (no newlines allowed)
11222                 """
11223                 if not self._background:
11224                         writemsg_level("\n")
11225                 self._status_display.displayMessage(msg)
11226
11227         def _save_resume_list(self):
11228                 """
11229                 Do this before verifying the ebuild Manifests since it might
11230                 be possible for the user to use --resume --skipfirst get past
11231                 a non-essential package with a broken digest.
11232                 """
11233                 mtimedb = self._mtimedb
11234                 mtimedb["resume"]["mergelist"] = [list(x) \
11235                         for x in self._mergelist \
11236                         if isinstance(x, Package) and x.operation == "merge"]
11237
11238                 mtimedb.commit()
11239
11240         def _calc_resume_list(self):
11241                 """
11242                 Use the current resume list to calculate a new one,
11243                 dropping any packages with unsatisfied deps.
11244                 @rtype: bool
11245                 @returns: True if successful, False otherwise.
11246                 """
11247                 print colorize("GOOD", "*** Resuming merge...")
11248
11249                 if self._show_list():
11250                         if "--tree" in self.myopts:
11251                                 portage.writemsg_stdout("\n" + \
11252                                         darkgreen("These are the packages that " + \
11253                                         "would be merged, in reverse order:\n\n"))
11254
11255                         else:
11256                                 portage.writemsg_stdout("\n" + \
11257                                         darkgreen("These are the packages that " + \
11258                                         "would be merged, in order:\n\n"))
11259
11260                 show_spinner = "--quiet" not in self.myopts and \
11261                         "--nodeps" not in self.myopts
11262
11263                 if show_spinner:
11264                         print "Calculating dependencies  ",
11265
11266                 myparams = create_depgraph_params(self.myopts, None)
11267                 success = False
11268                 e = None
11269                 try:
11270                         success, mydepgraph, dropped_tasks = resume_depgraph(
11271                                 self.settings, self.trees, self._mtimedb, self.myopts,
11272                                 myparams, self._spinner)
11273                 except depgraph.UnsatisfiedResumeDep, exc:
11274                         # rename variable to avoid python-3.0 error:
11275                         # SyntaxError: can not delete variable 'e' referenced in nested
11276                         #              scope
11277                         e = exc
11278                         mydepgraph = e.depgraph
11279                         dropped_tasks = set()
11280
11281                 if show_spinner:
11282                         print "\b\b... done!"
11283
11284                 if e is not None:
11285                         def unsatisfied_resume_dep_msg():
11286                                 mydepgraph.display_problems()
11287                                 out = portage.output.EOutput()
11288                                 out.eerror("One or more packages are either masked or " + \
11289                                         "have missing dependencies:")
11290                                 out.eerror("")
11291                                 indent = "  "
11292                                 show_parents = set()
11293                                 for dep in e.value:
11294                                         if dep.parent in show_parents:
11295                                                 continue
11296                                         show_parents.add(dep.parent)
11297                                         if dep.atom is None:
11298                                                 out.eerror(indent + "Masked package:")
11299                                                 out.eerror(2 * indent + str(dep.parent))
11300                                                 out.eerror("")
11301                                         else:
11302                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11303                                                 out.eerror(2 * indent + str(dep.parent))
11304                                                 out.eerror("")
11305                                 msg = "The resume list contains packages " + \
11306                                         "that are either masked or have " + \
11307                                         "unsatisfied dependencies. " + \
11308                                         "Please restart/continue " + \
11309                                         "the operation manually, or use --skipfirst " + \
11310                                         "to skip the first package in the list and " + \
11311                                         "any other packages that may be " + \
11312                                         "masked or have missing dependencies."
11313                                 for line in textwrap.wrap(msg, 72):
11314                                         out.eerror(line)
11315                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11316                         return False
11317
11318                 if success and self._show_list():
11319                         mylist = mydepgraph.altlist()
11320                         if mylist:
11321                                 if "--tree" in self.myopts:
11322                                         mylist.reverse()
11323                                 mydepgraph.display(mylist, favorites=self._favorites)
11324
11325                 if not success:
11326                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11327                         return False
11328                 mydepgraph.display_problems()
11329
11330                 mylist = mydepgraph.altlist()
11331                 mydepgraph.break_refs(mylist)
11332                 mydepgraph.break_refs(dropped_tasks)
11333                 self._mergelist = mylist
11334                 self._set_digraph(mydepgraph.schedulerGraph())
11335
11336                 msg_width = 75
11337                 for task in dropped_tasks:
11338                         if not (isinstance(task, Package) and task.operation == "merge"):
11339                                 continue
11340                         pkg = task
11341                         msg = "emerge --keep-going:" + \
11342                                 " %s" % (pkg.cpv,)
11343                         if pkg.root != "/":
11344                                 msg += " for %s" % (pkg.root,)
11345                         msg += " dropped due to unsatisfied dependency."
11346                         for line in textwrap.wrap(msg, msg_width):
11347                                 eerror(line, phase="other", key=pkg.cpv)
11348                         settings = self.pkgsettings[pkg.root]
11349                         # Ensure that log collection from $T is disabled inside
11350                         # elog_process(), since any logs that might exist are
11351                         # not valid here.
11352                         settings.pop("T", None)
11353                         portage.elog.elog_process(pkg.cpv, settings)
11354                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11355
11356                 return True
11357
11358         def _show_list(self):
11359                 myopts = self.myopts
11360                 if "--quiet" not in myopts and \
11361                         ("--ask" in myopts or "--tree" in myopts or \
11362                         "--verbose" in myopts):
11363                         return True
11364                 return False
11365
11366         def _world_atom(self, pkg):
11367                 """
11368                 Add the package to the world file, but only if
11369                 it's supposed to be added. Otherwise, do nothing.
11370                 """
11371
11372                 if set(("--buildpkgonly", "--fetchonly",
11373                         "--fetch-all-uri",
11374                         "--oneshot", "--onlydeps",
11375                         "--pretend")).intersection(self.myopts):
11376                         return
11377
11378                 if pkg.root != self.target_root:
11379                         return
11380
11381                 args_set = self._args_set
11382                 if not args_set.findAtomForPackage(pkg):
11383                         return
11384
11385                 logger = self._logger
11386                 pkg_count = self._pkg_count
11387                 root_config = pkg.root_config
11388                 world_set = root_config.sets["world"]
11389                 world_locked = False
11390                 if hasattr(world_set, "lock"):
11391                         world_set.lock()
11392                         world_locked = True
11393
11394                 try:
11395                         if hasattr(world_set, "load"):
11396                                 world_set.load() # maybe it's changed on disk
11397
11398                         atom = create_world_atom(pkg, args_set, root_config)
11399                         if atom:
11400                                 if hasattr(world_set, "add"):
11401                                         self._status_msg(('Recording %s in "world" ' + \
11402                                                 'favorites file...') % atom)
11403                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11404                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11405                                         world_set.add(atom)
11406                                 else:
11407                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11408                                                 (atom,), level=logging.WARN, noiselevel=-1)
11409                 finally:
11410                         if world_locked:
11411                                 world_set.unlock()
11412
11413         def _pkg(self, cpv, type_name, root_config, installed=False):
11414                 """
11415                 Get a package instance from the cache, or create a new
11416                 one if necessary. Raises KeyError from aux_get if it
11417                 failures for some reason (package does not exist or is
11418                 corrupt).
11419                 """
11420                 operation = "merge"
11421                 if installed:
11422                         operation = "nomerge"
11423
11424                 if self._digraph is not None:
11425                         # Reuse existing instance when available.
11426                         pkg = self._digraph.get(
11427                                 (type_name, root_config.root, cpv, operation))
11428                         if pkg is not None:
11429                                 return pkg
11430
11431                 tree_type = depgraph.pkg_tree_map[type_name]
11432                 db = root_config.trees[tree_type].dbapi
11433                 db_keys = list(self.trees[root_config.root][
11434                         tree_type].dbapi._aux_cache_keys)
11435                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11436                 pkg = Package(cpv=cpv, metadata=metadata,
11437                         root_config=root_config, installed=installed)
11438                 if type_name == "ebuild":
11439                         settings = self.pkgsettings[root_config.root]
11440                         settings.setcpv(pkg)
11441                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11442
11443                 return pkg
11444
11445 class MetadataRegen(PollScheduler):
11446
11447         def __init__(self, portdb, max_jobs=None, max_load=None):
11448                 PollScheduler.__init__(self)
11449                 self._portdb = portdb
11450
11451                 if max_jobs is None:
11452                         max_jobs = 1
11453
11454                 self._max_jobs = max_jobs
11455                 self._max_load = max_load
11456                 self._sched_iface = self._sched_iface_class(
11457                         register=self._register,
11458                         schedule=self._schedule_wait,
11459                         unregister=self._unregister)
11460
11461                 self._valid_pkgs = set()
11462                 self._process_iter = self._iter_metadata_processes()
11463                 self.returncode = os.EX_OK
11464                 self._error_count = 0
11465
11466         def _iter_metadata_processes(self):
11467                 portdb = self._portdb
11468                 valid_pkgs = self._valid_pkgs
11469                 every_cp = portdb.cp_all()
11470                 every_cp.sort(reverse=True)
11471
11472                 while every_cp:
11473                         cp = every_cp.pop()
11474                         portage.writemsg_stdout("Processing %s\n" % cp)
11475                         cpv_list = portdb.cp_list(cp)
11476                         for cpv in cpv_list:
11477                                 valid_pkgs.add(cpv)
11478                                 ebuild_path, repo_path = portdb.findname2(cpv)
11479                                 metadata_process = portdb._metadata_process(
11480                                         cpv, ebuild_path, repo_path)
11481                                 if metadata_process is None:
11482                                         continue
11483                                 yield metadata_process
11484
11485         def run(self):
11486
11487                 portdb = self._portdb
11488                 from portage.cache.cache_errors import CacheError
11489                 dead_nodes = {}
11490
11491                 for mytree in portdb.porttrees:
11492                         try:
11493                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11494                         except CacheError, e:
11495                                 portage.writemsg("Error listing cache entries for " + \
11496                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11497                                 del e
11498                                 dead_nodes = None
11499                                 break
11500
11501                 while self._schedule():
11502                         self._poll_loop()
11503
11504                 while self._jobs:
11505                         self._poll_loop()
11506
11507                 if dead_nodes:
11508                         for y in self._valid_pkgs:
11509                                 for mytree in portdb.porttrees:
11510                                         if portdb.findname2(y, mytree=mytree)[0]:
11511                                                 dead_nodes[mytree].discard(y)
11512
11513                         for mytree, nodes in dead_nodes.iteritems():
11514                                 auxdb = portdb.auxdb[mytree]
11515                                 for y in nodes:
11516                                         try:
11517                                                 del auxdb[y]
11518                                         except (KeyError, CacheError):
11519                                                 pass
11520
11521         def _schedule_tasks(self):
11522                 """
11523                 @rtype: bool
11524                 @returns: True if there may be remaining tasks to schedule,
11525                         False otherwise.
11526                 """
11527                 while self._can_add_job():
11528                         try:
11529                                 metadata_process = self._process_iter.next()
11530                         except StopIteration:
11531                                 return False
11532
11533                         self._jobs += 1
11534                         metadata_process.scheduler = self._sched_iface
11535                         metadata_process.addExitListener(self._metadata_exit)
11536                         metadata_process.start()
11537                 return True
11538
11539         def _metadata_exit(self, metadata_process):
11540                 self._jobs -= 1
11541                 if metadata_process.returncode != os.EX_OK:
11542                         self.returncode = 1
11543                         self._error_count += 1
11544                         self._valid_pkgs.discard(metadata_process.cpv)
11545                         portage.writemsg("Error processing %s, continuing...\n" % \
11546                                 (metadata_process.cpv,))
11547                 self._schedule()
11548
11549 class UninstallFailure(portage.exception.PortageException):
11550         """
11551         An instance of this class is raised by unmerge() when
11552         an uninstallation fails.
11553         """
11554         status = 1
11555         def __init__(self, *pargs):
11556                 portage.exception.PortageException.__init__(self, pargs)
11557                 if pargs:
11558                         self.status = pargs[0]
11559
11560 def unmerge(root_config, myopts, unmerge_action,
11561         unmerge_files, ldpath_mtimes, autoclean=0,
11562         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11563         scheduler=None, writemsg_level=portage.util.writemsg_level):
11564
11565         quiet = "--quiet" in myopts
11566         settings = root_config.settings
11567         sets = root_config.sets
11568         vartree = root_config.trees["vartree"]
11569         candidate_catpkgs=[]
11570         global_unmerge=0
11571         xterm_titles = "notitles" not in settings.features
11572         out = portage.output.EOutput()
11573         pkg_cache = {}
11574         db_keys = list(vartree.dbapi._aux_cache_keys)
11575
11576         def _pkg(cpv):
11577                 pkg = pkg_cache.get(cpv)
11578                 if pkg is None:
11579                         pkg = Package(cpv=cpv, installed=True,
11580                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11581                                 root_config=root_config,
11582                                 type_name="installed")
11583                         pkg_cache[cpv] = pkg
11584                 return pkg
11585
11586         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11587         try:
11588                 # At least the parent needs to exist for the lock file.
11589                 portage.util.ensure_dirs(vdb_path)
11590         except portage.exception.PortageException:
11591                 pass
11592         vdb_lock = None
11593         try:
11594                 if os.access(vdb_path, os.W_OK):
11595                         vdb_lock = portage.locks.lockdir(vdb_path)
11596                 realsyslist = sets["system"].getAtoms()
11597                 syslist = []
11598                 for x in realsyslist:
11599                         mycp = portage.dep_getkey(x)
11600                         if mycp in settings.getvirtuals():
11601                                 providers = []
11602                                 for provider in settings.getvirtuals()[mycp]:
11603                                         if vartree.dbapi.match(provider):
11604                                                 providers.append(provider)
11605                                 if len(providers) == 1:
11606                                         syslist.extend(providers)
11607                         else:
11608                                 syslist.append(mycp)
11609         
11610                 mysettings = portage.config(clone=settings)
11611         
11612                 if not unmerge_files:
11613                         if unmerge_action == "unmerge":
11614                                 print
11615                                 print bold("emerge unmerge") + " can only be used with specific package names"
11616                                 print
11617                                 return 0
11618                         else:
11619                                 global_unmerge = 1
11620         
11621                 localtree = vartree
11622                 # process all arguments and add all
11623                 # valid db entries to candidate_catpkgs
11624                 if global_unmerge:
11625                         if not unmerge_files:
11626                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11627                 else:
11628                         #we've got command-line arguments
11629                         if not unmerge_files:
11630                                 print "\nNo packages to unmerge have been provided.\n"
11631                                 return 0
11632                         for x in unmerge_files:
11633                                 arg_parts = x.split('/')
11634                                 if x[0] not in [".","/"] and \
11635                                         arg_parts[-1][-7:] != ".ebuild":
11636                                         #possible cat/pkg or dep; treat as such
11637                                         candidate_catpkgs.append(x)
11638                                 elif unmerge_action in ["prune","clean"]:
11639                                         print "\n!!! Prune and clean do not accept individual" + \
11640                                                 " ebuilds as arguments;\n    skipping.\n"
11641                                         continue
11642                                 else:
11643                                         # it appears that the user is specifying an installed
11644                                         # ebuild and we're in "unmerge" mode, so it's ok.
11645                                         if not os.path.exists(x):
11646                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11647                                                 return 0
11648         
11649                                         absx   = os.path.abspath(x)
11650                                         sp_absx = absx.split("/")
11651                                         if sp_absx[-1][-7:] == ".ebuild":
11652                                                 del sp_absx[-1]
11653                                                 absx = "/".join(sp_absx)
11654         
11655                                         sp_absx_len = len(sp_absx)
11656         
11657                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11658                                         vdb_len  = len(vdb_path)
11659         
11660                                         sp_vdb     = vdb_path.split("/")
11661                                         sp_vdb_len = len(sp_vdb)
11662         
11663                                         if not os.path.exists(absx+"/CONTENTS"):
11664                                                 print "!!! Not a valid db dir: "+str(absx)
11665                                                 return 0
11666         
11667                                         if sp_absx_len <= sp_vdb_len:
11668                                                 # The Path is shorter... so it can't be inside the vdb.
11669                                                 print sp_absx
11670                                                 print absx
11671                                                 print "\n!!!",x,"cannot be inside "+ \
11672                                                         vdb_path+"; aborting.\n"
11673                                                 return 0
11674         
11675                                         for idx in range(0,sp_vdb_len):
11676                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11677                                                         print sp_absx
11678                                                         print absx
11679                                                         print "\n!!!", x, "is not inside "+\
11680                                                                 vdb_path+"; aborting.\n"
11681                                                         return 0
11682         
11683                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11684                                         candidate_catpkgs.append(
11685                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11686         
11687                 newline=""
11688                 if (not "--quiet" in myopts):
11689                         newline="\n"
11690                 if settings["ROOT"] != "/":
11691                         writemsg_level(darkgreen(newline+ \
11692                                 ">>> Using system located in ROOT tree %s\n" % \
11693                                 settings["ROOT"]))
11694
11695                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11696                         not ("--quiet" in myopts):
11697                         writemsg_level(darkgreen(newline+\
11698                                 ">>> These are the packages that would be unmerged:\n"))
11699
11700                 # Preservation of order is required for --depclean and --prune so
11701                 # that dependencies are respected. Use all_selected to eliminate
11702                 # duplicate packages since the same package may be selected by
11703                 # multiple atoms.
11704                 pkgmap = []
11705                 all_selected = set()
11706                 for x in candidate_catpkgs:
11707                         # cycle through all our candidate deps and determine
11708                         # what will and will not get unmerged
11709                         try:
11710                                 mymatch = vartree.dbapi.match(x)
11711                         except portage.exception.AmbiguousPackageName, errpkgs:
11712                                 print "\n\n!!! The short ebuild name \"" + \
11713                                         x + "\" is ambiguous.  Please specify"
11714                                 print "!!! one of the following fully-qualified " + \
11715                                         "ebuild names instead:\n"
11716                                 for i in errpkgs[0]:
11717                                         print "    " + green(i)
11718                                 print
11719                                 sys.exit(1)
11720         
11721                         if not mymatch and x[0] not in "<>=~":
11722                                 mymatch = localtree.dep_match(x)
11723                         if not mymatch:
11724                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11725                                         (x, unmerge_action), noiselevel=-1)
11726                                 continue
11727
11728                         pkgmap.append(
11729                                 {"protected": set(), "selected": set(), "omitted": set()})
11730                         mykey = len(pkgmap) - 1
11731                         if unmerge_action=="unmerge":
11732                                         for y in mymatch:
11733                                                 if y not in all_selected:
11734                                                         pkgmap[mykey]["selected"].add(y)
11735                                                         all_selected.add(y)
11736                         elif unmerge_action == "prune":
11737                                 if len(mymatch) == 1:
11738                                         continue
11739                                 best_version = mymatch[0]
11740                                 best_slot = vartree.getslot(best_version)
11741                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11742                                 for mypkg in mymatch[1:]:
11743                                         myslot = vartree.getslot(mypkg)
11744                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11745                                         if (myslot == best_slot and mycounter > best_counter) or \
11746                                                 mypkg == portage.best([mypkg, best_version]):
11747                                                 if myslot == best_slot:
11748                                                         if mycounter < best_counter:
11749                                                                 # On slot collision, keep the one with the
11750                                                                 # highest counter since it is the most
11751                                                                 # recently installed.
11752                                                                 continue
11753                                                 best_version = mypkg
11754                                                 best_slot = myslot
11755                                                 best_counter = mycounter
11756                                 pkgmap[mykey]["protected"].add(best_version)
11757                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11758                                         if mypkg != best_version and mypkg not in all_selected)
11759                                 all_selected.update(pkgmap[mykey]["selected"])
11760                         else:
11761                                 # unmerge_action == "clean"
11762                                 slotmap={}
11763                                 for mypkg in mymatch:
11764                                         if unmerge_action == "clean":
11765                                                 myslot = localtree.getslot(mypkg)
11766                                         else:
11767                                                 # since we're pruning, we don't care about slots
11768                                                 # and put all the pkgs in together
11769                                                 myslot = 0
11770                                         if myslot not in slotmap:
11771                                                 slotmap[myslot] = {}
11772                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11773
11774                                 for mypkg in vartree.dbapi.cp_list(
11775                                         portage.dep_getkey(mymatch[0])):
11776                                         myslot = vartree.getslot(mypkg)
11777                                         if myslot not in slotmap:
11778                                                 slotmap[myslot] = {}
11779                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11780
11781                                 for myslot in slotmap:
11782                                         counterkeys = slotmap[myslot].keys()
11783                                         if not counterkeys:
11784                                                 continue
11785                                         counterkeys.sort()
11786                                         pkgmap[mykey]["protected"].add(
11787                                                 slotmap[myslot][counterkeys[-1]])
11788                                         del counterkeys[-1]
11789
11790                                         for counter in counterkeys[:]:
11791                                                 mypkg = slotmap[myslot][counter]
11792                                                 if mypkg not in mymatch:
11793                                                         counterkeys.remove(counter)
11794                                                         pkgmap[mykey]["protected"].add(
11795                                                                 slotmap[myslot][counter])
11796
11797                                         #be pretty and get them in order of merge:
11798                                         for ckey in counterkeys:
11799                                                 mypkg = slotmap[myslot][ckey]
11800                                                 if mypkg not in all_selected:
11801                                                         pkgmap[mykey]["selected"].add(mypkg)
11802                                                         all_selected.add(mypkg)
11803                                         # ok, now the last-merged package
11804                                         # is protected, and the rest are selected
11805                 numselected = len(all_selected)
11806                 if global_unmerge and not numselected:
11807                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11808                         return 0
11809         
11810                 if not numselected:
11811                         portage.writemsg_stdout(
11812                                 "\n>>> No packages selected for removal by " + \
11813                                 unmerge_action + "\n")
11814                         return 0
11815         finally:
11816                 if vdb_lock:
11817                         vartree.dbapi.flush_cache()
11818                         portage.locks.unlockdir(vdb_lock)
11819         
11820         from portage.sets.base import EditablePackageSet
11821         
11822         # generate a list of package sets that are directly or indirectly listed in "world",
11823         # as there is no persistent list of "installed" sets
11824         installed_sets = ["world"]
11825         stop = False
11826         pos = 0
11827         while not stop:
11828                 stop = True
11829                 pos = len(installed_sets)
11830                 for s in installed_sets[pos - 1:]:
11831                         if s not in sets:
11832                                 continue
11833                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11834                         if candidates:
11835                                 stop = False
11836                                 installed_sets += candidates
11837         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11838         del stop, pos
11839
11840         # we don't want to unmerge packages that are still listed in user-editable package sets
11841         # listed in "world" as they would be remerged on the next update of "world" or the 
11842         # relevant package sets.
11843         unknown_sets = set()
11844         for cp in xrange(len(pkgmap)):
11845                 for cpv in pkgmap[cp]["selected"].copy():
11846                         try:
11847                                 pkg = _pkg(cpv)
11848                         except KeyError:
11849                                 # It could have been uninstalled
11850                                 # by a concurrent process.
11851                                 continue
11852
11853                         if unmerge_action != "clean" and \
11854                                 root_config.root == "/" and \
11855                                 portage.match_from_list(
11856                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11857                                 msg = ("Not unmerging package %s since there is no valid " + \
11858                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11859                                 for line in textwrap.wrap(msg, 75):
11860                                         out.eerror(line)
11861                                 # adjust pkgmap so the display output is correct
11862                                 pkgmap[cp]["selected"].remove(cpv)
11863                                 all_selected.remove(cpv)
11864                                 pkgmap[cp]["protected"].add(cpv)
11865                                 continue
11866
11867                         parents = []
11868                         for s in installed_sets:
11869                                 # skip sets that the user requested to unmerge, and skip world 
11870                                 # unless we're unmerging a package set (as the package would be 
11871                                 # removed from "world" later on)
11872                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11873                                         continue
11874
11875                                 if s not in sets:
11876                                         if s in unknown_sets:
11877                                                 continue
11878                                         unknown_sets.add(s)
11879                                         out = portage.output.EOutput()
11880                                         out.eerror(("Unknown set '@%s' in " + \
11881                                                 "%svar/lib/portage/world_sets") % \
11882                                                 (s, root_config.root))
11883                                         continue
11884
11885                                 # only check instances of EditablePackageSet as other classes are generally used for
11886                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11887                                 # user can't do much about them anyway)
11888                                 if isinstance(sets[s], EditablePackageSet):
11889
11890                                         # This is derived from a snippet of code in the
11891                                         # depgraph._iter_atoms_for_pkg() method.
11892                                         for atom in sets[s].iterAtomsForPackage(pkg):
11893                                                 inst_matches = vartree.dbapi.match(atom)
11894                                                 inst_matches.reverse() # descending order
11895                                                 higher_slot = None
11896                                                 for inst_cpv in inst_matches:
11897                                                         try:
11898                                                                 inst_pkg = _pkg(inst_cpv)
11899                                                         except KeyError:
11900                                                                 # It could have been uninstalled
11901                                                                 # by a concurrent process.
11902                                                                 continue
11903
11904                                                         if inst_pkg.cp != atom.cp:
11905                                                                 continue
11906                                                         if pkg >= inst_pkg:
11907                                                                 # This is descending order, and we're not
11908                                                                 # interested in any versions <= pkg given.
11909                                                                 break
11910                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11911                                                                 higher_slot = inst_pkg
11912                                                                 break
11913                                                 if higher_slot is None:
11914                                                         parents.append(s)
11915                                                         break
11916                         if parents:
11917                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11918                                 #print colorize("WARN", "but still listed in the following package sets:")
11919                                 #print "    %s\n" % ", ".join(parents)
11920                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11921                                 print colorize("WARN", "still referenced by the following package sets:")
11922                                 print "    %s\n" % ", ".join(parents)
11923                                 # adjust pkgmap so the display output is correct
11924                                 pkgmap[cp]["selected"].remove(cpv)
11925                                 all_selected.remove(cpv)
11926                                 pkgmap[cp]["protected"].add(cpv)
11927         
11928         del installed_sets
11929
11930         numselected = len(all_selected)
11931         if not numselected:
11932                 writemsg_level(
11933                         "\n>>> No packages selected for removal by " + \
11934                         unmerge_action + "\n")
11935                 return 0
11936
11937         # Unmerge order only matters in some cases
11938         if not ordered:
11939                 unordered = {}
11940                 for d in pkgmap:
11941                         selected = d["selected"]
11942                         if not selected:
11943                                 continue
11944                         cp = portage.cpv_getkey(iter(selected).next())
11945                         cp_dict = unordered.get(cp)
11946                         if cp_dict is None:
11947                                 cp_dict = {}
11948                                 unordered[cp] = cp_dict
11949                                 for k in d:
11950                                         cp_dict[k] = set()
11951                         for k, v in d.iteritems():
11952                                 cp_dict[k].update(v)
11953                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11954
11955         for x in xrange(len(pkgmap)):
11956                 selected = pkgmap[x]["selected"]
11957                 if not selected:
11958                         continue
11959                 for mytype, mylist in pkgmap[x].iteritems():
11960                         if mytype == "selected":
11961                                 continue
11962                         mylist.difference_update(all_selected)
11963                 cp = portage.cpv_getkey(iter(selected).next())
11964                 for y in localtree.dep_match(cp):
11965                         if y not in pkgmap[x]["omitted"] and \
11966                                 y not in pkgmap[x]["selected"] and \
11967                                 y not in pkgmap[x]["protected"] and \
11968                                 y not in all_selected:
11969                                 pkgmap[x]["omitted"].add(y)
11970                 if global_unmerge and not pkgmap[x]["selected"]:
11971                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11972                         continue
11973                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11974                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11975                                 "'%s' is part of your system profile.\n" % cp),
11976                                 level=logging.WARNING, noiselevel=-1)
11977                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11978                                 "be damaging to your system.\n\n"),
11979                                 level=logging.WARNING, noiselevel=-1)
11980                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11981                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11982                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11983                 if not quiet:
11984                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11985                 else:
11986                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11987                 for mytype in ["selected","protected","omitted"]:
11988                         if not quiet:
11989                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11990                         if pkgmap[x][mytype]:
11991                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11992                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
11993                                 for pn, ver, rev in sorted_pkgs:
11994                                         if rev == "r0":
11995                                                 myversion = ver
11996                                         else:
11997                                                 myversion = ver + "-" + rev
11998                                         if mytype == "selected":
11999                                                 writemsg_level(
12000                                                         colorize("UNMERGE_WARN", myversion + " "),
12001                                                         noiselevel=-1)
12002                                         else:
12003                                                 writemsg_level(
12004                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12005                         else:
12006                                 writemsg_level("none ", noiselevel=-1)
12007                         if not quiet:
12008                                 writemsg_level("\n", noiselevel=-1)
12009                 if quiet:
12010                         writemsg_level("\n", noiselevel=-1)
12011
12012         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12013                 " packages are slated for removal.\n")
12014         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12015                         " and " + colorize("GOOD", "'omitted'") + \
12016                         " packages will not be removed.\n\n")
12017
12018         if "--pretend" in myopts:
12019                 #we're done... return
12020                 return 0
12021         if "--ask" in myopts:
12022                 if userquery("Would you like to unmerge these packages?")=="No":
12023                         # enter pretend mode for correct formatting of results
12024                         myopts["--pretend"] = True
12025                         print
12026                         print "Quitting."
12027                         print
12028                         return 0
12029         #the real unmerging begins, after a short delay....
12030         if clean_delay and not autoclean:
12031                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12032
12033         for x in xrange(len(pkgmap)):
12034                 for y in pkgmap[x]["selected"]:
12035                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12036                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12037                         mysplit = y.split("/")
12038                         #unmerge...
12039                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12040                                 mysettings, unmerge_action not in ["clean","prune"],
12041                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12042                                 scheduler=scheduler)
12043
12044                         if retval != os.EX_OK:
12045                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12046                                 if raise_on_error:
12047                                         raise UninstallFailure(retval)
12048                                 sys.exit(retval)
12049                         else:
12050                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12051                                         sets["world"].cleanPackage(vartree.dbapi, y)
12052                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12053         if clean_world and hasattr(sets["world"], "remove"):
12054                 for s in root_config.setconfig.active:
12055                         sets["world"].remove(SETPREFIX+s)
12056         return 1
12057
12058 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12059
12060         if os.path.exists("/usr/bin/install-info"):
12061                 out = portage.output.EOutput()
12062                 regen_infodirs=[]
12063                 for z in infodirs:
12064                         if z=='':
12065                                 continue
12066                         inforoot=normpath(root+z)
12067                         if os.path.isdir(inforoot):
12068                                 infomtime = long(os.stat(inforoot).st_mtime)
12069                                 if inforoot not in prev_mtimes or \
12070                                         prev_mtimes[inforoot] != infomtime:
12071                                                 regen_infodirs.append(inforoot)
12072
12073                 if not regen_infodirs:
12074                         portage.writemsg_stdout("\n")
12075                         out.einfo("GNU info directory index is up-to-date.")
12076                 else:
12077                         portage.writemsg_stdout("\n")
12078                         out.einfo("Regenerating GNU info directory index...")
12079
12080                         dir_extensions = ("", ".gz", ".bz2")
12081                         icount=0
12082                         badcount=0
12083                         errmsg = ""
12084                         for inforoot in regen_infodirs:
12085                                 if inforoot=='':
12086                                         continue
12087
12088                                 if not os.path.isdir(inforoot) or \
12089                                         not os.access(inforoot, os.W_OK):
12090                                         continue
12091
12092                                 file_list = os.listdir(inforoot)
12093                                 file_list.sort()
12094                                 dir_file = os.path.join(inforoot, "dir")
12095                                 moved_old_dir = False
12096                                 processed_count = 0
12097                                 for x in file_list:
12098                                         if x.startswith(".") or \
12099                                                 os.path.isdir(os.path.join(inforoot, x)):
12100                                                 continue
12101                                         if x.startswith("dir"):
12102                                                 skip = False
12103                                                 for ext in dir_extensions:
12104                                                         if x == "dir" + ext or \
12105                                                                 x == "dir" + ext + ".old":
12106                                                                 skip = True
12107                                                                 break
12108                                                 if skip:
12109                                                         continue
12110                                         if processed_count == 0:
12111                                                 for ext in dir_extensions:
12112                                                         try:
12113                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12114                                                                 moved_old_dir = True
12115                                                         except EnvironmentError, e:
12116                                                                 if e.errno != errno.ENOENT:
12117                                                                         raise
12118                                                                 del e
12119                                         processed_count += 1
12120                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12121                                         existsstr="already exists, for file `"
12122                                         if myso!="":
12123                                                 if re.search(existsstr,myso):
12124                                                         # Already exists... Don't increment the count for this.
12125                                                         pass
12126                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12127                                                         # This info file doesn't contain a DIR-header: install-info produces this
12128                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12129                                                         # Don't increment the count for this.
12130                                                         pass
12131                                                 else:
12132                                                         badcount=badcount+1
12133                                                         errmsg += myso + "\n"
12134                                         icount=icount+1
12135
12136                                 if moved_old_dir and not os.path.exists(dir_file):
12137                                         # We didn't generate a new dir file, so put the old file
12138                                         # back where it was originally found.
12139                                         for ext in dir_extensions:
12140                                                 try:
12141                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12142                                                 except EnvironmentError, e:
12143                                                         if e.errno != errno.ENOENT:
12144                                                                 raise
12145                                                         del e
12146
12147                                 # Clean dir.old cruft so that they don't prevent
12148                                 # unmerge of otherwise empty directories.
12149                                 for ext in dir_extensions:
12150                                         try:
12151                                                 os.unlink(dir_file + ext + ".old")
12152                                         except EnvironmentError, e:
12153                                                 if e.errno != errno.ENOENT:
12154                                                         raise
12155                                                 del e
12156
12157                                 #update mtime so we can potentially avoid regenerating.
12158                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12159
12160                         if badcount:
12161                                 out.eerror("Processed %d info files; %d errors." % \
12162                                         (icount, badcount))
12163                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12164                         else:
12165                                 if icount > 0:
12166                                         out.einfo("Processed %d info files." % (icount,))
12167
12168
12169 def display_news_notification(root_config, myopts):
12170         target_root = root_config.root
12171         trees = root_config.trees
12172         settings = trees["vartree"].settings
12173         portdb = trees["porttree"].dbapi
12174         vardb = trees["vartree"].dbapi
12175         NEWS_PATH = os.path.join("metadata", "news")
12176         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12177         newsReaderDisplay = False
12178         update = "--pretend" not in myopts
12179
12180         for repo in portdb.getRepositories():
12181                 unreadItems = checkUpdatedNewsItems(
12182                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12183                 if unreadItems:
12184                         if not newsReaderDisplay:
12185                                 newsReaderDisplay = True
12186                                 print
12187                         print colorize("WARN", " * IMPORTANT:"),
12188                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12189                         
12190         
12191         if newsReaderDisplay:
12192                 print colorize("WARN", " *"),
12193                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12194                 print
12195
12196 def display_preserved_libs(vardbapi):
12197         MAX_DISPLAY = 3
12198
12199         # Ensure the registry is consistent with existing files.
12200         vardbapi.plib_registry.pruneNonExisting()
12201
12202         if vardbapi.plib_registry.hasEntries():
12203                 print
12204                 print colorize("WARN", "!!!") + " existing preserved libs:"
12205                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12206                 linkmap = vardbapi.linkmap
12207                 consumer_map = {}
12208                 owners = {}
12209                 linkmap_broken = False
12210
12211                 try:
12212                         linkmap.rebuild()
12213                 except portage.exception.CommandNotFound, e:
12214                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12215                                 level=logging.ERROR, noiselevel=-1)
12216                         del e
12217                         linkmap_broken = True
12218                 else:
12219                         search_for_owners = set()
12220                         for cpv in plibdata:
12221                                 internal_plib_keys = set(linkmap._obj_key(f) \
12222                                         for f in plibdata[cpv])
12223                                 for f in plibdata[cpv]:
12224                                         if f in consumer_map:
12225                                                 continue
12226                                         consumers = []
12227                                         for c in linkmap.findConsumers(f):
12228                                                 # Filter out any consumers that are also preserved libs
12229                                                 # belonging to the same package as the provider.
12230                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12231                                                         consumers.append(c)
12232                                         consumers.sort()
12233                                         consumer_map[f] = consumers
12234                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12235
12236                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12237
12238                 for cpv in plibdata:
12239                         print colorize("WARN", ">>>") + " package: %s" % cpv
12240                         samefile_map = {}
12241                         for f in plibdata[cpv]:
12242                                 obj_key = linkmap._obj_key(f)
12243                                 alt_paths = samefile_map.get(obj_key)
12244                                 if alt_paths is None:
12245                                         alt_paths = set()
12246                                         samefile_map[obj_key] = alt_paths
12247                                 alt_paths.add(f)
12248
12249                         for alt_paths in samefile_map.itervalues():
12250                                 alt_paths = sorted(alt_paths)
12251                                 for p in alt_paths:
12252                                         print colorize("WARN", " * ") + " - %s" % (p,)
12253                                 f = alt_paths[0]
12254                                 consumers = consumer_map.get(f, [])
12255                                 for c in consumers[:MAX_DISPLAY]:
12256                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12257                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12258                                 if len(consumers) == MAX_DISPLAY + 1:
12259                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12260                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12261                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12262                                 elif len(consumers) > MAX_DISPLAY:
12263                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12264                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12265
12266
12267 def _flush_elog_mod_echo():
12268         """
12269         Dump the mod_echo output now so that our other
12270         notifications are shown last.
12271         @rtype: bool
12272         @returns: True if messages were shown, False otherwise.
12273         """
12274         messages_shown = False
12275         try:
12276                 from portage.elog import mod_echo
12277         except ImportError:
12278                 pass # happens during downgrade to a version without the module
12279         else:
12280                 messages_shown = bool(mod_echo._items)
12281                 mod_echo.finalize()
12282         return messages_shown
12283
12284 def post_emerge(root_config, myopts, mtimedb, retval):
12285         """
12286         Misc. things to run at the end of a merge session.
12287         
12288         Update Info Files
12289         Update Config Files
12290         Update News Items
12291         Commit mtimeDB
12292         Display preserved libs warnings
12293         Exit Emerge
12294
12295         @param trees: A dictionary mapping each ROOT to it's package databases
12296         @type trees: dict
12297         @param mtimedb: The mtimeDB to store data needed across merge invocations
12298         @type mtimedb: MtimeDB class instance
12299         @param retval: Emerge's return value
12300         @type retval: Int
12301         @rype: None
12302         @returns:
12303         1.  Calls sys.exit(retval)
12304         """
12305
12306         target_root = root_config.root
12307         trees = { target_root : root_config.trees }
12308         vardbapi = trees[target_root]["vartree"].dbapi
12309         settings = vardbapi.settings
12310         info_mtimes = mtimedb["info"]
12311
12312         # Load the most current variables from ${ROOT}/etc/profile.env
12313         settings.unlock()
12314         settings.reload()
12315         settings.regenerate()
12316         settings.lock()
12317
12318         config_protect = settings.get("CONFIG_PROTECT","").split()
12319         infodirs = settings.get("INFOPATH","").split(":") + \
12320                 settings.get("INFODIR","").split(":")
12321
12322         os.chdir("/")
12323
12324         if retval == os.EX_OK:
12325                 exit_msg = " *** exiting successfully."
12326         else:
12327                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12328         emergelog("notitles" not in settings.features, exit_msg)
12329
12330         _flush_elog_mod_echo()
12331
12332         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12333         if "--pretend" in myopts or (counter_hash is not None and \
12334                 counter_hash == vardbapi._counter_hash()):
12335                 display_news_notification(root_config, myopts)
12336                 # If vdb state has not changed then there's nothing else to do.
12337                 sys.exit(retval)
12338
12339         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12340         portage.util.ensure_dirs(vdb_path)
12341         vdb_lock = None
12342         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12343                 vdb_lock = portage.locks.lockdir(vdb_path)
12344
12345         if vdb_lock:
12346                 try:
12347                         if "noinfo" not in settings.features:
12348                                 chk_updated_info_files(target_root,
12349                                         infodirs, info_mtimes, retval)
12350                         mtimedb.commit()
12351                 finally:
12352                         if vdb_lock:
12353                                 portage.locks.unlockdir(vdb_lock)
12354
12355         chk_updated_cfg_files(target_root, config_protect)
12356         
12357         display_news_notification(root_config, myopts)
12358         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12359                 display_preserved_libs(vardbapi)        
12360
12361         sys.exit(retval)
12362
12363
12364 def chk_updated_cfg_files(target_root, config_protect):
12365         if config_protect:
12366                 #number of directories with some protect files in them
12367                 procount=0
12368                 for x in config_protect:
12369                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12370                         if not os.access(x, os.W_OK):
12371                                 # Avoid Permission denied errors generated
12372                                 # later by `find`.
12373                                 continue
12374                         try:
12375                                 mymode = os.lstat(x).st_mode
12376                         except OSError:
12377                                 continue
12378                         if stat.S_ISLNK(mymode):
12379                                 # We want to treat it like a directory if it
12380                                 # is a symlink to an existing directory.
12381                                 try:
12382                                         real_mode = os.stat(x).st_mode
12383                                         if stat.S_ISDIR(real_mode):
12384                                                 mymode = real_mode
12385                                 except OSError:
12386                                         pass
12387                         if stat.S_ISDIR(mymode):
12388                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12389                         else:
12390                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12391                                         os.path.split(x.rstrip(os.path.sep))
12392                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12393                         a = commands.getstatusoutput(mycommand)
12394                         if a[0] != 0:
12395                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12396                                 sys.stderr.flush()
12397                                 # Show the error message alone, sending stdout to /dev/null.
12398                                 os.system(mycommand + " 1>/dev/null")
12399                         else:
12400                                 files = a[1].split('\0')
12401                                 # split always produces an empty string as the last element
12402                                 if files and not files[-1]:
12403                                         del files[-1]
12404                                 if files:
12405                                         procount += 1
12406                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12407                                         if stat.S_ISDIR(mymode):
12408                                                  print "%d config files in '%s' need updating." % \
12409                                                         (len(files), x)
12410                                         else:
12411                                                  print "config file '%s' needs updating." % x
12412
12413                 if procount:
12414                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12415                                 " section of the " + bold("emerge")
12416                         print " "+yellow("*")+" man page to learn how to update config files."
12417
12418 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12419         update=False):
12420         """
12421         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12422         Returns the number of unread (yet relevent) items.
12423         
12424         @param portdb: a portage tree database
12425         @type portdb: pordbapi
12426         @param vardb: an installed package database
12427         @type vardb: vardbapi
12428         @param NEWS_PATH:
12429         @type NEWS_PATH:
12430         @param UNREAD_PATH:
12431         @type UNREAD_PATH:
12432         @param repo_id:
12433         @type repo_id:
12434         @rtype: Integer
12435         @returns:
12436         1.  The number of unread but relevant news items.
12437         
12438         """
12439         from portage.news import NewsManager
12440         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12441         return manager.getUnreadItems( repo_id, update=update )
12442
12443 def insert_category_into_atom(atom, category):
12444         alphanum = re.search(r'\w', atom)
12445         if alphanum:
12446                 ret = atom[:alphanum.start()] + "%s/" % category + \
12447                         atom[alphanum.start():]
12448         else:
12449                 ret = None
12450         return ret
12451
12452 def is_valid_package_atom(x):
12453         if "/" not in x:
12454                 alphanum = re.search(r'\w', x)
12455                 if alphanum:
12456                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12457         return portage.isvalidatom(x)
12458
12459 def show_blocker_docs_link():
12460         print
12461         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12462         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12463         print
12464         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12465         print
12466
12467 def show_mask_docs():
12468         print "For more information, see the MASKED PACKAGES section in the emerge"
12469         print "man page or refer to the Gentoo Handbook."
12470
12471 def action_sync(settings, trees, mtimedb, myopts, myaction):
12472         xterm_titles = "notitles" not in settings.features
12473         emergelog(xterm_titles, " === sync")
12474         myportdir = settings.get("PORTDIR", None)
12475         out = portage.output.EOutput()
12476         if not myportdir:
12477                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12478                 sys.exit(1)
12479         if myportdir[-1]=="/":
12480                 myportdir=myportdir[:-1]
12481         try:
12482                 st = os.stat(myportdir)
12483         except OSError:
12484                 st = None
12485         if st is None:
12486                 print ">>>",myportdir,"not found, creating it."
12487                 os.makedirs(myportdir,0755)
12488                 st = os.stat(myportdir)
12489
12490         spawn_kwargs = {}
12491         spawn_kwargs["env"] = settings.environ()
12492         if 'usersync' in settings.features and \
12493                 portage.data.secpass >= 2 and \
12494                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12495                 st.st_gid != os.getgid() and st.st_mode & 0070):
12496                 try:
12497                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12498                 except KeyError:
12499                         pass
12500                 else:
12501                         # Drop privileges when syncing, in order to match
12502                         # existing uid/gid settings.
12503                         spawn_kwargs["uid"]    = st.st_uid
12504                         spawn_kwargs["gid"]    = st.st_gid
12505                         spawn_kwargs["groups"] = [st.st_gid]
12506                         spawn_kwargs["env"]["HOME"] = homedir
12507                         umask = 0002
12508                         if not st.st_mode & 0020:
12509                                 umask = umask | 0020
12510                         spawn_kwargs["umask"] = umask
12511
12512         syncuri = settings.get("SYNC", "").strip()
12513         if not syncuri:
12514                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12515                         noiselevel=-1, level=logging.ERROR)
12516                 return 1
12517
12518         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12519         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12520
12521         os.umask(0022)
12522         dosyncuri = syncuri
12523         updatecache_flg = False
12524         if myaction == "metadata":
12525                 print "skipping sync"
12526                 updatecache_flg = True
12527         elif ".git" in vcs_dirs:
12528                 # Update existing git repository, and ignore the syncuri. We are
12529                 # going to trust the user and assume that the user is in the branch
12530                 # that he/she wants updated. We'll let the user manage branches with
12531                 # git directly.
12532                 if portage.process.find_binary("git") is None:
12533                         msg = ["Command not found: git",
12534                         "Type \"emerge dev-util/git\" to enable git support."]
12535                         for l in msg:
12536                                 writemsg_level("!!! %s\n" % l,
12537                                         level=logging.ERROR, noiselevel=-1)
12538                         return 1
12539                 msg = ">>> Starting git pull in %s..." % myportdir
12540                 emergelog(xterm_titles, msg )
12541                 writemsg_level(msg + "\n")
12542                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12543                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12544                 if exitcode != os.EX_OK:
12545                         msg = "!!! git pull error in %s." % myportdir
12546                         emergelog(xterm_titles, msg)
12547                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12548                         return exitcode
12549                 msg = ">>> Git pull in %s successful" % myportdir
12550                 emergelog(xterm_titles, msg)
12551                 writemsg_level(msg + "\n")
12552                 exitcode = git_sync_timestamps(settings, myportdir)
12553                 if exitcode == os.EX_OK:
12554                         updatecache_flg = True
12555         elif syncuri[:8]=="rsync://":
12556                 for vcs_dir in vcs_dirs:
12557                         writemsg_level(("!!! %s appears to be under revision " + \
12558                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12559                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12560                         return 1
12561                 if not os.path.exists("/usr/bin/rsync"):
12562                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12563                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12564                         sys.exit(1)
12565                 mytimeout=180
12566
12567                 rsync_opts = []
12568                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12569                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12570                         rsync_opts.extend([
12571                                 "--recursive",    # Recurse directories
12572                                 "--links",        # Consider symlinks
12573                                 "--safe-links",   # Ignore links outside of tree
12574                                 "--perms",        # Preserve permissions
12575                                 "--times",        # Preserive mod times
12576                                 "--compress",     # Compress the data transmitted
12577                                 "--force",        # Force deletion on non-empty dirs
12578                                 "--whole-file",   # Don't do block transfers, only entire files
12579                                 "--delete",       # Delete files that aren't in the master tree
12580                                 "--stats",        # Show final statistics about what was transfered
12581                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12582                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12583                                 "--exclude=/local",       # Exclude local     from consideration
12584                                 "--exclude=/packages",    # Exclude packages  from consideration
12585                         ])
12586
12587                 else:
12588                         # The below validation is not needed when using the above hardcoded
12589                         # defaults.
12590
12591                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12592                         rsync_opts.extend(
12593                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12594                         for opt in ("--recursive", "--times"):
12595                                 if opt not in rsync_opts:
12596                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12597                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12598                                         rsync_opts.append(opt)
12599         
12600                         for exclude in ("distfiles", "local", "packages"):
12601                                 opt = "--exclude=/%s" % exclude
12602                                 if opt not in rsync_opts:
12603                                         portage.writemsg(yellow("WARNING:") + \
12604                                         " adding required option %s not included in "  % opt + \
12605                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12606                                         rsync_opts.append(opt)
12607         
12608                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12609                                 def rsync_opt_startswith(opt_prefix):
12610                                         for x in rsync_opts:
12611                                                 if x.startswith(opt_prefix):
12612                                                         return True
12613                                         return False
12614
12615                                 if not rsync_opt_startswith("--timeout="):
12616                                         rsync_opts.append("--timeout=%d" % mytimeout)
12617
12618                                 for opt in ("--compress", "--whole-file"):
12619                                         if opt not in rsync_opts:
12620                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12621                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12622                                                 rsync_opts.append(opt)
12623
12624                 if "--quiet" in myopts:
12625                         rsync_opts.append("--quiet")    # Shut up a lot
12626                 else:
12627                         rsync_opts.append("--verbose")  # Print filelist
12628
12629                 if "--verbose" in myopts:
12630                         rsync_opts.append("--progress")  # Progress meter for each file
12631
12632                 if "--debug" in myopts:
12633                         rsync_opts.append("--checksum") # Force checksum on all files
12634
12635                 # Real local timestamp file.
12636                 servertimestampfile = os.path.join(
12637                         myportdir, "metadata", "timestamp.chk")
12638
12639                 content = portage.util.grabfile(servertimestampfile)
12640                 mytimestamp = 0
12641                 if content:
12642                         try:
12643                                 mytimestamp = time.mktime(time.strptime(content[0],
12644                                         "%a, %d %b %Y %H:%M:%S +0000"))
12645                         except (OverflowError, ValueError):
12646                                 pass
12647                 del content
12648
12649                 try:
12650                         rsync_initial_timeout = \
12651                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12652                 except ValueError:
12653                         rsync_initial_timeout = 15
12654
12655                 try:
12656                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12657                 except SystemExit, e:
12658                         raise # Needed else can't exit
12659                 except:
12660                         maxretries=3 #default number of retries
12661
12662                 retries=0
12663                 user_name, hostname, port = re.split(
12664                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12665                 if port is None:
12666                         port=""
12667                 if user_name is None:
12668                         user_name=""
12669                 updatecache_flg=True
12670                 all_rsync_opts = set(rsync_opts)
12671                 extra_rsync_opts = shlex.split(
12672                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12673                 all_rsync_opts.update(extra_rsync_opts)
12674                 family = socket.AF_INET
12675                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12676                         family = socket.AF_INET
12677                 elif socket.has_ipv6 and \
12678                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12679                         family = socket.AF_INET6
12680                 ips=[]
12681                 SERVER_OUT_OF_DATE = -1
12682                 EXCEEDED_MAX_RETRIES = -2
12683                 while (1):
12684                         if ips:
12685                                 del ips[0]
12686                         if ips==[]:
12687                                 try:
12688                                         for addrinfo in socket.getaddrinfo(
12689                                                 hostname, None, family, socket.SOCK_STREAM):
12690                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12691                                                         # IPv6 addresses need to be enclosed in square brackets
12692                                                         ips.append("[%s]" % addrinfo[4][0])
12693                                                 else:
12694                                                         ips.append(addrinfo[4][0])
12695                                         from random import shuffle
12696                                         shuffle(ips)
12697                                 except SystemExit, e:
12698                                         raise # Needed else can't exit
12699                                 except Exception, e:
12700                                         print "Notice:",str(e)
12701                                         dosyncuri=syncuri
12702
12703                         if ips:
12704                                 try:
12705                                         dosyncuri = syncuri.replace(
12706                                                 "//" + user_name + hostname + port + "/",
12707                                                 "//" + user_name + ips[0] + port + "/", 1)
12708                                 except SystemExit, e:
12709                                         raise # Needed else can't exit
12710                                 except Exception, e:
12711                                         print "Notice:",str(e)
12712                                         dosyncuri=syncuri
12713
12714                         if (retries==0):
12715                                 if "--ask" in myopts:
12716                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12717                                                 print
12718                                                 print "Quitting."
12719                                                 print
12720                                                 sys.exit(0)
12721                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12722                                 if "--quiet" not in myopts:
12723                                         print ">>> Starting rsync with "+dosyncuri+"..."
12724                         else:
12725                                 emergelog(xterm_titles,
12726                                         ">>> Starting retry %d of %d with %s" % \
12727                                                 (retries,maxretries,dosyncuri))
12728                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12729
12730                         if mytimestamp != 0 and "--quiet" not in myopts:
12731                                 print ">>> Checking server timestamp ..."
12732
12733                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12734
12735                         if "--debug" in myopts:
12736                                 print rsynccommand
12737
12738                         exitcode = os.EX_OK
12739                         servertimestamp = 0
12740                         # Even if there's no timestamp available locally, fetch the
12741                         # timestamp anyway as an initial probe to verify that the server is
12742                         # responsive.  This protects us from hanging indefinitely on a
12743                         # connection attempt to an unresponsive server which rsync's
12744                         # --timeout option does not prevent.
12745                         if True:
12746                                 # Temporary file for remote server timestamp comparison.
12747                                 from tempfile import mkstemp
12748                                 fd, tmpservertimestampfile = mkstemp()
12749                                 os.close(fd)
12750                                 mycommand = rsynccommand[:]
12751                                 mycommand.append(dosyncuri.rstrip("/") + \
12752                                         "/metadata/timestamp.chk")
12753                                 mycommand.append(tmpservertimestampfile)
12754                                 content = None
12755                                 mypids = []
12756                                 try:
12757                                         def timeout_handler(signum, frame):
12758                                                 raise portage.exception.PortageException("timed out")
12759                                         signal.signal(signal.SIGALRM, timeout_handler)
12760                                         # Timeout here in case the server is unresponsive.  The
12761                                         # --timeout rsync option doesn't apply to the initial
12762                                         # connection attempt.
12763                                         if rsync_initial_timeout:
12764                                                 signal.alarm(rsync_initial_timeout)
12765                                         try:
12766                                                 mypids.extend(portage.process.spawn(
12767                                                         mycommand, env=settings.environ(), returnpid=True))
12768                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12769                                                 content = portage.grabfile(tmpservertimestampfile)
12770                                         finally:
12771                                                 if rsync_initial_timeout:
12772                                                         signal.alarm(0)
12773                                                 try:
12774                                                         os.unlink(tmpservertimestampfile)
12775                                                 except OSError:
12776                                                         pass
12777                                 except portage.exception.PortageException, e:
12778                                         # timed out
12779                                         print e
12780                                         del e
12781                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12782                                                 os.kill(mypids[0], signal.SIGTERM)
12783                                                 os.waitpid(mypids[0], 0)
12784                                         # This is the same code rsync uses for timeout.
12785                                         exitcode = 30
12786                                 else:
12787                                         if exitcode != os.EX_OK:
12788                                                 if exitcode & 0xff:
12789                                                         exitcode = (exitcode & 0xff) << 8
12790                                                 else:
12791                                                         exitcode = exitcode >> 8
12792                                 if mypids:
12793                                         portage.process.spawned_pids.remove(mypids[0])
12794                                 if content:
12795                                         try:
12796                                                 servertimestamp = time.mktime(time.strptime(
12797                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12798                                         except (OverflowError, ValueError):
12799                                                 pass
12800                                 del mycommand, mypids, content
12801                         if exitcode == os.EX_OK:
12802                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12803                                         emergelog(xterm_titles,
12804                                                 ">>> Cancelling sync -- Already current.")
12805                                         print
12806                                         print ">>>"
12807                                         print ">>> Timestamps on the server and in the local repository are the same."
12808                                         print ">>> Cancelling all further sync action. You are already up to date."
12809                                         print ">>>"
12810                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12811                                         print ">>>"
12812                                         print
12813                                         sys.exit(0)
12814                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12815                                         emergelog(xterm_titles,
12816                                                 ">>> Server out of date: %s" % dosyncuri)
12817                                         print
12818                                         print ">>>"
12819                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12820                                         print ">>>"
12821                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12822                                         print ">>>"
12823                                         print
12824                                         exitcode = SERVER_OUT_OF_DATE
12825                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12826                                         # actual sync
12827                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12828                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12829                                         if exitcode in [0,1,3,4,11,14,20,21]:
12830                                                 break
12831                         elif exitcode in [1,3,4,11,14,20,21]:
12832                                 break
12833                         else:
12834                                 # Code 2 indicates protocol incompatibility, which is expected
12835                                 # for servers with protocol < 29 that don't support
12836                                 # --prune-empty-directories.  Retry for a server that supports
12837                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12838                                 pass
12839
12840                         retries=retries+1
12841
12842                         if retries<=maxretries:
12843                                 print ">>> Retrying..."
12844                                 time.sleep(11)
12845                         else:
12846                                 # over retries
12847                                 # exit loop
12848                                 updatecache_flg=False
12849                                 exitcode = EXCEEDED_MAX_RETRIES
12850                                 break
12851
12852                 if (exitcode==0):
12853                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12854                 elif exitcode == SERVER_OUT_OF_DATE:
12855                         sys.exit(1)
12856                 elif exitcode == EXCEEDED_MAX_RETRIES:
12857                         sys.stderr.write(
12858                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12859                         sys.exit(1)
12860                 elif (exitcode>0):
12861                         msg = []
12862                         if exitcode==1:
12863                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12864                                 msg.append("that your SYNC statement is proper.")
12865                                 msg.append("SYNC=" + settings["SYNC"])
12866                         elif exitcode==11:
12867                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12868                                 msg.append("this means your disk is full, but can be caused by corruption")
12869                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12870                                 msg.append("and try again after the problem has been fixed.")
12871                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12872                         elif exitcode==20:
12873                                 msg.append("Rsync was killed before it finished.")
12874                         else:
12875                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12876                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12877                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12878                                 msg.append("temporary problem unless complications exist with your network")
12879                                 msg.append("(and possibly your system's filesystem) configuration.")
12880                         for line in msg:
12881                                 out.eerror(line)
12882                         sys.exit(exitcode)
12883         elif syncuri[:6]=="cvs://":
12884                 if not os.path.exists("/usr/bin/cvs"):
12885                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12886                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12887                         sys.exit(1)
12888                 cvsroot=syncuri[6:]
12889                 cvsdir=os.path.dirname(myportdir)
12890                 if not os.path.exists(myportdir+"/CVS"):
12891                         #initial checkout
12892                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12893                         if os.path.exists(cvsdir+"/gentoo-x86"):
12894                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12895                                 sys.exit(1)
12896                         try:
12897                                 os.rmdir(myportdir)
12898                         except OSError, e:
12899                                 if e.errno != errno.ENOENT:
12900                                         sys.stderr.write(
12901                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12902                                         sys.exit(1)
12903                                 del e
12904                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12905                                 print "!!! cvs checkout error; exiting."
12906                                 sys.exit(1)
12907                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12908                 else:
12909                         #cvs update
12910                         print ">>> Starting cvs update with "+syncuri+"..."
12911                         retval = portage.process.spawn_bash(
12912                                 "cd %s; cvs -z0 -q update -dP" % \
12913                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12914                         if retval != os.EX_OK:
12915                                 sys.exit(retval)
12916                 dosyncuri = syncuri
12917         else:
12918                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12919                         noiselevel=-1, level=logging.ERROR)
12920                 return 1
12921
12922         if updatecache_flg and  \
12923                 myaction != "metadata" and \
12924                 "metadata-transfer" not in settings.features:
12925                 updatecache_flg = False
12926
12927         # Reload the whole config from scratch.
12928         settings, trees, mtimedb = load_emerge_config(trees=trees)
12929         root_config = trees[settings["ROOT"]]["root_config"]
12930         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12931
12932         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12933                 action_metadata(settings, portdb, myopts)
12934
12935         if portage._global_updates(trees, mtimedb["updates"]):
12936                 mtimedb.commit()
12937                 # Reload the whole config from scratch.
12938                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12939                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12940                 root_config = trees[settings["ROOT"]]["root_config"]
12941
12942         mybestpv = portdb.xmatch("bestmatch-visible",
12943                 portage.const.PORTAGE_PACKAGE_ATOM)
12944         mypvs = portage.best(
12945                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12946                 portage.const.PORTAGE_PACKAGE_ATOM))
12947
12948         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12949
12950         if myaction != "metadata":
12951                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12952                         retval = portage.process.spawn(
12953                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12954                                 dosyncuri], env=settings.environ())
12955                         if retval != os.EX_OK:
12956                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12957
12958         if(mybestpv != mypvs) and not "--quiet" in myopts:
12959                 print
12960                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12961                 print red(" * ")+"that you update portage now, before any other packages are updated."
12962                 print
12963                 print red(" * ")+"To update portage, run 'emerge portage' now."
12964                 print
12965         
12966         display_news_notification(root_config, myopts)
12967         return os.EX_OK
12968
12969 def git_sync_timestamps(settings, portdir):
12970         """
12971         Since git doesn't preserve timestamps, synchronize timestamps between
12972         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12973         for a given file as long as the file in the working tree is not modified
12974         (relative to HEAD).
12975         """
12976         cache_dir = os.path.join(portdir, "metadata", "cache")
12977         if not os.path.isdir(cache_dir):
12978                 return os.EX_OK
12979         writemsg_level(">>> Synchronizing timestamps...\n")
12980
12981         from portage.cache.cache_errors import CacheError
12982         try:
12983                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12984                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12985         except CacheError, e:
12986                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12987                         level=logging.ERROR, noiselevel=-1)
12988                 return 1
12989
12990         ec_dir = os.path.join(portdir, "eclass")
12991         try:
12992                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12993                         if f.endswith(".eclass"))
12994         except OSError, e:
12995                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12996                         level=logging.ERROR, noiselevel=-1)
12997                 return 1
12998
12999         args = [portage.const.BASH_BINARY, "-c",
13000                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13001                 portage._shell_quote(portdir)]
13002         import subprocess
13003         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13004         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13005         rval = proc.wait()
13006         if rval != os.EX_OK:
13007                 return rval
13008
13009         modified_eclasses = set(ec for ec in ec_names \
13010                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13011
13012         updated_ec_mtimes = {}
13013
13014         for cpv in cache_db:
13015                 cpv_split = portage.catpkgsplit(cpv)
13016                 if cpv_split is None:
13017                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13018                                 level=logging.ERROR, noiselevel=-1)
13019                         continue
13020
13021                 cat, pn, ver, rev = cpv_split
13022                 cat, pf = portage.catsplit(cpv)
13023                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13024                 if relative_eb_path in modified_files:
13025                         continue
13026
13027                 try:
13028                         cache_entry = cache_db[cpv]
13029                         eb_mtime = cache_entry.get("_mtime_")
13030                         ec_mtimes = cache_entry.get("_eclasses_")
13031                 except KeyError:
13032                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13033                                 level=logging.ERROR, noiselevel=-1)
13034                         continue
13035                 except CacheError, e:
13036                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13037                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13038                         continue
13039
13040                 if eb_mtime is None:
13041                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13042                                 level=logging.ERROR, noiselevel=-1)
13043                         continue
13044
13045                 try:
13046                         eb_mtime = long(eb_mtime)
13047                 except ValueError:
13048                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13049                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13050                         continue
13051
13052                 if ec_mtimes is None:
13053                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13054                                 level=logging.ERROR, noiselevel=-1)
13055                         continue
13056
13057                 if modified_eclasses.intersection(ec_mtimes):
13058                         continue
13059
13060                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13061                 if missing_eclasses:
13062                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13063                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13064                                 noiselevel=-1)
13065                         continue
13066
13067                 eb_path = os.path.join(portdir, relative_eb_path)
13068                 try:
13069                         current_eb_mtime = os.stat(eb_path)
13070                 except OSError:
13071                         writemsg_level("!!! Missing ebuild: %s\n" % \
13072                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13073                         continue
13074
13075                 inconsistent = False
13076                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13077                         updated_mtime = updated_ec_mtimes.get(ec)
13078                         if updated_mtime is not None and updated_mtime != ec_mtime:
13079                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13080                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13081                                 inconsistent = True
13082                                 break
13083
13084                 if inconsistent:
13085                         continue
13086
13087                 if current_eb_mtime != eb_mtime:
13088                         os.utime(eb_path, (eb_mtime, eb_mtime))
13089
13090                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13091                         if ec in updated_ec_mtimes:
13092                                 continue
13093                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13094                         current_mtime = long(os.stat(ec_path).st_mtime)
13095                         if current_mtime != ec_mtime:
13096                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13097                         updated_ec_mtimes[ec] = ec_mtime
13098
13099         return os.EX_OK
13100
13101 def action_metadata(settings, portdb, myopts):
13102         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13103         old_umask = os.umask(0002)
13104         cachedir = os.path.normpath(settings.depcachedir)
13105         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13106                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13107                                         "/sys", "/tmp", "/usr",  "/var"]:
13108                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13109                         "ROOT DIRECTORY ON YOUR SYSTEM."
13110                 print >> sys.stderr, \
13111                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13112                 sys.exit(73)
13113         if not os.path.exists(cachedir):
13114                 os.mkdir(cachedir)
13115
13116         ec = portage.eclass_cache.cache(portdb.porttree_root)
13117         myportdir = os.path.realpath(settings["PORTDIR"])
13118         cm = settings.load_best_module("portdbapi.metadbmodule")(
13119                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13120
13121         from portage.cache import util
13122
13123         class percentage_noise_maker(util.quiet_mirroring):
13124                 def __init__(self, dbapi):
13125                         self.dbapi = dbapi
13126                         self.cp_all = dbapi.cp_all()
13127                         l = len(self.cp_all)
13128                         self.call_update_min = 100000000
13129                         self.min_cp_all = l/100.0
13130                         self.count = 1
13131                         self.pstr = ''
13132
13133                 def __iter__(self):
13134                         for x in self.cp_all:
13135                                 self.count += 1
13136                                 if self.count > self.min_cp_all:
13137                                         self.call_update_min = 0
13138                                         self.count = 0
13139                                 for y in self.dbapi.cp_list(x):
13140                                         yield y
13141                         self.call_update_mine = 0
13142
13143                 def update(self, *arg):
13144                         try:                            self.pstr = int(self.pstr) + 1
13145                         except ValueError:      self.pstr = 1
13146                         sys.stdout.write("%s%i%%" % \
13147                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13148                         sys.stdout.flush()
13149                         self.call_update_min = 10000000
13150
13151                 def finish(self, *arg):
13152                         sys.stdout.write("\b\b\b\b100%\n")
13153                         sys.stdout.flush()
13154
13155         if "--quiet" in myopts:
13156                 def quicky_cpv_generator(cp_all_list):
13157                         for x in cp_all_list:
13158                                 for y in portdb.cp_list(x):
13159                                         yield y
13160                 source = quicky_cpv_generator(portdb.cp_all())
13161                 noise_maker = portage.cache.util.quiet_mirroring()
13162         else:
13163                 noise_maker = source = percentage_noise_maker(portdb)
13164         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13165                 eclass_cache=ec, verbose_instance=noise_maker)
13166
13167         sys.stdout.flush()
13168         os.umask(old_umask)
13169
13170 def action_regen(settings, portdb, max_jobs, max_load):
13171         xterm_titles = "notitles" not in settings.features
13172         emergelog(xterm_titles, " === regen")
13173         #regenerate cache entries
13174         portage.writemsg_stdout("Regenerating cache entries...\n")
13175         try:
13176                 os.close(sys.stdin.fileno())
13177         except SystemExit, e:
13178                 raise # Needed else can't exit
13179         except:
13180                 pass
13181         sys.stdout.flush()
13182
13183         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13184         regen.run()
13185
13186         portage.writemsg_stdout("done!\n")
13187         return regen.returncode
13188
13189 def action_config(settings, trees, myopts, myfiles):
13190         if len(myfiles) != 1:
13191                 print red("!!! config can only take a single package atom at this time\n")
13192                 sys.exit(1)
13193         if not is_valid_package_atom(myfiles[0]):
13194                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13195                         noiselevel=-1)
13196                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13197                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13198                 sys.exit(1)
13199         print
13200         try:
13201                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13202         except portage.exception.AmbiguousPackageName, e:
13203                 # Multiple matches thrown from cpv_expand
13204                 pkgs = e.args[0]
13205         if len(pkgs) == 0:
13206                 print "No packages found.\n"
13207                 sys.exit(0)
13208         elif len(pkgs) > 1:
13209                 if "--ask" in myopts:
13210                         options = []
13211                         print "Please select a package to configure:"
13212                         idx = 0
13213                         for pkg in pkgs:
13214                                 idx += 1
13215                                 options.append(str(idx))
13216                                 print options[-1]+") "+pkg
13217                         print "X) Cancel"
13218                         options.append("X")
13219                         idx = userquery("Selection?", options)
13220                         if idx == "X":
13221                                 sys.exit(0)
13222                         pkg = pkgs[int(idx)-1]
13223                 else:
13224                         print "The following packages available:"
13225                         for pkg in pkgs:
13226                                 print "* "+pkg
13227                         print "\nPlease use a specific atom or the --ask option."
13228                         sys.exit(1)
13229         else:
13230                 pkg = pkgs[0]
13231
13232         print
13233         if "--ask" in myopts:
13234                 if userquery("Ready to configure "+pkg+"?") == "No":
13235                         sys.exit(0)
13236         else:
13237                 print "Configuring pkg..."
13238         print
13239         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13240         mysettings = portage.config(clone=settings)
13241         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13242         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13243         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13244                 mysettings,
13245                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13246                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13247         if retval == os.EX_OK:
13248                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13249                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13250         print
13251
13252 def action_info(settings, trees, myopts, myfiles):
13253         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13254                 settings.profile_path, settings["CHOST"],
13255                 trees[settings["ROOT"]]["vartree"].dbapi)
13256         header_width = 65
13257         header_title = "System Settings"
13258         if myfiles:
13259                 print header_width * "="
13260                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13261         print header_width * "="
13262         print "System uname: "+platform.platform(aliased=1)
13263
13264         lastSync = portage.grabfile(os.path.join(
13265                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13266         print "Timestamp of tree:",
13267         if lastSync:
13268                 print lastSync[0]
13269         else:
13270                 print "Unknown"
13271
13272         output=commands.getstatusoutput("distcc --version")
13273         if not output[0]:
13274                 print str(output[1].split("\n",1)[0]),
13275                 if "distcc" in settings.features:
13276                         print "[enabled]"
13277                 else:
13278                         print "[disabled]"
13279
13280         output=commands.getstatusoutput("ccache -V")
13281         if not output[0]:
13282                 print str(output[1].split("\n",1)[0]),
13283                 if "ccache" in settings.features:
13284                         print "[enabled]"
13285                 else:
13286                         print "[disabled]"
13287
13288         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13289                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13290         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13291         myvars  = portage.util.unique_array(myvars)
13292         myvars.sort()
13293
13294         for x in myvars:
13295                 if portage.isvalidatom(x):
13296                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13297                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13298                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13299                         pkgs = []
13300                         for pn, ver, rev in pkg_matches:
13301                                 if rev != "r0":
13302                                         pkgs.append(ver + "-" + rev)
13303                                 else:
13304                                         pkgs.append(ver)
13305                         if pkgs:
13306                                 pkgs = ", ".join(pkgs)
13307                                 print "%-20s %s" % (x+":", pkgs)
13308                 else:
13309                         print "%-20s %s" % (x+":", "[NOT VALID]")
13310
13311         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13312
13313         if "--verbose" in myopts:
13314                 myvars=settings.keys()
13315         else:
13316                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13317                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13318                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13319                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13320
13321                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13322
13323         myvars = portage.util.unique_array(myvars)
13324         unset_vars = []
13325         myvars.sort()
13326         for x in myvars:
13327                 if x in settings:
13328                         if x != "USE":
13329                                 print '%s="%s"' % (x, settings[x])
13330                         else:
13331                                 use = set(settings["USE"].split())
13332                                 use_expand = settings["USE_EXPAND"].split()
13333                                 use_expand.sort()
13334                                 for varname in use_expand:
13335                                         flag_prefix = varname.lower() + "_"
13336                                         for f in list(use):
13337                                                 if f.startswith(flag_prefix):
13338                                                         use.remove(f)
13339                                 use = list(use)
13340                                 use.sort()
13341                                 print 'USE="%s"' % " ".join(use),
13342                                 for varname in use_expand:
13343                                         myval = settings.get(varname)
13344                                         if myval:
13345                                                 print '%s="%s"' % (varname, myval),
13346                                 print
13347                 else:
13348                         unset_vars.append(x)
13349         if unset_vars:
13350                 print "Unset:  "+", ".join(unset_vars)
13351         print
13352
13353         if "--debug" in myopts:
13354                 for x in dir(portage):
13355                         module = getattr(portage, x)
13356                         if "cvs_id_string" in dir(module):
13357                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13358
13359         # See if we can find any packages installed matching the strings
13360         # passed on the command line
13361         mypkgs = []
13362         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13363         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13364         for x in myfiles:
13365                 mypkgs.extend(vardb.match(x))
13366
13367         # If some packages were found...
13368         if mypkgs:
13369                 # Get our global settings (we only print stuff if it varies from
13370                 # the current config)
13371                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13372                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13373                 global_vals = {}
13374                 pkgsettings = portage.config(clone=settings)
13375
13376                 for myvar in mydesiredvars:
13377                         global_vals[myvar] = set(settings.get(myvar, "").split())
13378
13379                 # Loop through each package
13380                 # Only print settings if they differ from global settings
13381                 header_title = "Package Settings"
13382                 print header_width * "="
13383                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13384                 print header_width * "="
13385                 from portage.output import EOutput
13386                 out = EOutput()
13387                 for pkg in mypkgs:
13388                         # Get all package specific variables
13389                         auxvalues = vardb.aux_get(pkg, auxkeys)
13390                         valuesmap = {}
13391                         for i in xrange(len(auxkeys)):
13392                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13393                         diff_values = {}
13394                         for myvar in mydesiredvars:
13395                                 # If the package variable doesn't match the
13396                                 # current global variable, something has changed
13397                                 # so set diff_found so we know to print
13398                                 if valuesmap[myvar] != global_vals[myvar]:
13399                                         diff_values[myvar] = valuesmap[myvar]
13400                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13401                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13402                         pkgsettings.reset()
13403                         # If a matching ebuild is no longer available in the tree, maybe it
13404                         # would make sense to compare against the flags for the best
13405                         # available version with the same slot?
13406                         mydb = None
13407                         if portdb.cpv_exists(pkg):
13408                                 mydb = portdb
13409                         pkgsettings.setcpv(pkg, mydb=mydb)
13410                         if valuesmap["IUSE"].intersection(
13411                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13412                                 diff_values["USE"] = valuesmap["USE"]
13413                         # If a difference was found, print the info for
13414                         # this package.
13415                         if diff_values:
13416                                 # Print package info
13417                                 print "%s was built with the following:" % pkg
13418                                 for myvar in mydesiredvars + ["USE"]:
13419                                         if myvar in diff_values:
13420                                                 mylist = list(diff_values[myvar])
13421                                                 mylist.sort()
13422                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13423                                 print
13424                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13425                         ebuildpath = vardb.findname(pkg)
13426                         if not ebuildpath or not os.path.exists(ebuildpath):
13427                                 out.ewarn("No ebuild found for '%s'" % pkg)
13428                                 continue
13429                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13430                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13431                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13432                                 tree="vartree")
13433
13434 def action_search(root_config, myopts, myfiles, spinner):
13435         if not myfiles:
13436                 print "emerge: no search terms provided."
13437         else:
13438                 searchinstance = search(root_config,
13439                         spinner, "--searchdesc" in myopts,
13440                         "--quiet" not in myopts, "--usepkg" in myopts,
13441                         "--usepkgonly" in myopts)
13442                 for mysearch in myfiles:
13443                         try:
13444                                 searchinstance.execute(mysearch)
13445                         except re.error, comment:
13446                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13447                                 sys.exit(1)
13448                         searchinstance.output()
13449
13450 def action_depclean(settings, trees, ldpath_mtimes,
13451         myopts, action, myfiles, spinner):
13452         # Kill packages that aren't explicitly merged or are required as a
13453         # dependency of another package. World file is explicit.
13454
13455         # Global depclean or prune operations are not very safe when there are
13456         # missing dependencies since it's unknown how badly incomplete
13457         # the dependency graph is, and we might accidentally remove packages
13458         # that should have been pulled into the graph. On the other hand, it's
13459         # relatively safe to ignore missing deps when only asked to remove
13460         # specific packages.
13461         allow_missing_deps = len(myfiles) > 0
13462
13463         msg = []
13464         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13465         msg.append("mistakes. Packages that are part of the world set will always\n")
13466         msg.append("be kept.  They can be manually added to this set with\n")
13467         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13468         msg.append("package.provided (see portage(5)) will be removed by\n")
13469         msg.append("depclean, even if they are part of the world set.\n")
13470         msg.append("\n")
13471         msg.append("As a safety measure, depclean will not remove any packages\n")
13472         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13473         msg.append("consequence, it is often necessary to run %s\n" % \
13474                 good("`emerge --update"))
13475         msg.append(good("--newuse --deep @system @world`") + \
13476                 " prior to depclean.\n")
13477
13478         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13479                 portage.writemsg_stdout("\n")
13480                 for x in msg:
13481                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13482
13483         xterm_titles = "notitles" not in settings.features
13484         myroot = settings["ROOT"]
13485         root_config = trees[myroot]["root_config"]
13486         getSetAtoms = root_config.setconfig.getSetAtoms
13487         vardb = trees[myroot]["vartree"].dbapi
13488
13489         required_set_names = ("system", "world")
13490         required_sets = {}
13491         set_args = []
13492
13493         for s in required_set_names:
13494                 required_sets[s] = InternalPackageSet(
13495                         initial_atoms=getSetAtoms(s))
13496
13497         
13498         # When removing packages, use a temporary version of world
13499         # which excludes packages that are intended to be eligible for
13500         # removal.
13501         world_temp_set = required_sets["world"]
13502         system_set = required_sets["system"]
13503
13504         if not system_set or not world_temp_set:
13505
13506                 if not system_set:
13507                         writemsg_level("!!! You have no system list.\n",
13508                                 level=logging.ERROR, noiselevel=-1)
13509
13510                 if not world_temp_set:
13511                         writemsg_level("!!! You have no world file.\n",
13512                                         level=logging.WARNING, noiselevel=-1)
13513
13514                 writemsg_level("!!! Proceeding is likely to " + \
13515                         "break your installation.\n",
13516                         level=logging.WARNING, noiselevel=-1)
13517                 if "--pretend" not in myopts:
13518                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13519
13520         if action == "depclean":
13521                 emergelog(xterm_titles, " >>> depclean")
13522
13523         import textwrap
13524         args_set = InternalPackageSet()
13525         if myfiles:
13526                 for x in myfiles:
13527                         if not is_valid_package_atom(x):
13528                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13529                                         level=logging.ERROR, noiselevel=-1)
13530                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13531                                 return
13532                         try:
13533                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13534                         except portage.exception.AmbiguousPackageName, e:
13535                                 msg = "The short ebuild name \"" + x + \
13536                                         "\" is ambiguous.  Please specify " + \
13537                                         "one of the following " + \
13538                                         "fully-qualified ebuild names instead:"
13539                                 for line in textwrap.wrap(msg, 70):
13540                                         writemsg_level("!!! %s\n" % (line,),
13541                                                 level=logging.ERROR, noiselevel=-1)
13542                                 for i in e[0]:
13543                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13544                                                 level=logging.ERROR, noiselevel=-1)
13545                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13546                                 return
13547                         args_set.add(atom)
13548                 matched_packages = False
13549                 for x in args_set:
13550                         if vardb.match(x):
13551                                 matched_packages = True
13552                                 break
13553                 if not matched_packages:
13554                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13555                                 action)
13556                         return
13557
13558         writemsg_level("\nCalculating dependencies  ")
13559         resolver_params = create_depgraph_params(myopts, "remove")
13560         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13561         vardb = resolver.trees[myroot]["vartree"].dbapi
13562
13563         if action == "depclean":
13564
13565                 if args_set:
13566                         # Pull in everything that's installed but not matched
13567                         # by an argument atom since we don't want to clean any
13568                         # package if something depends on it.
13569
13570                         world_temp_set.clear()
13571                         for pkg in vardb:
13572                                 spinner.update()
13573
13574                                 try:
13575                                         if args_set.findAtomForPackage(pkg) is None:
13576                                                 world_temp_set.add("=" + pkg.cpv)
13577                                                 continue
13578                                 except portage.exception.InvalidDependString, e:
13579                                         show_invalid_depstring_notice(pkg,
13580                                                 pkg.metadata["PROVIDE"], str(e))
13581                                         del e
13582                                         world_temp_set.add("=" + pkg.cpv)
13583                                         continue
13584
13585         elif action == "prune":
13586
13587                 # Pull in everything that's installed since we don't
13588                 # to prune a package if something depends on it.
13589                 world_temp_set.clear()
13590                 world_temp_set.update(vardb.cp_all())
13591
13592                 if not args_set:
13593
13594                         # Try to prune everything that's slotted.
13595                         for cp in vardb.cp_all():
13596                                 if len(vardb.cp_list(cp)) > 1:
13597                                         args_set.add(cp)
13598
13599                 # Remove atoms from world that match installed packages
13600                 # that are also matched by argument atoms, but do not remove
13601                 # them if they match the highest installed version.
13602                 for pkg in vardb:
13603                         spinner.update()
13604                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13605                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13606                                 raise AssertionError("package expected in matches: " + \
13607                                         "cp = %s, cpv = %s matches = %s" % \
13608                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13609
13610                         highest_version = pkgs_for_cp[-1]
13611                         if pkg == highest_version:
13612                                 # pkg is the highest version
13613                                 world_temp_set.add("=" + pkg.cpv)
13614                                 continue
13615
13616                         if len(pkgs_for_cp) <= 1:
13617                                 raise AssertionError("more packages expected: " + \
13618                                         "cp = %s, cpv = %s matches = %s" % \
13619                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13620
13621                         try:
13622                                 if args_set.findAtomForPackage(pkg) is None:
13623                                         world_temp_set.add("=" + pkg.cpv)
13624                                         continue
13625                         except portage.exception.InvalidDependString, e:
13626                                 show_invalid_depstring_notice(pkg,
13627                                         pkg.metadata["PROVIDE"], str(e))
13628                                 del e
13629                                 world_temp_set.add("=" + pkg.cpv)
13630                                 continue
13631
13632         set_args = {}
13633         for s, package_set in required_sets.iteritems():
13634                 set_atom = SETPREFIX + s
13635                 set_arg = SetArg(arg=set_atom, set=package_set,
13636                         root_config=resolver.roots[myroot])
13637                 set_args[s] = set_arg
13638                 for atom in set_arg.set:
13639                         resolver._dep_stack.append(
13640                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13641                         resolver.digraph.add(set_arg, None)
13642
13643         success = resolver._complete_graph()
13644         writemsg_level("\b\b... done!\n")
13645
13646         resolver.display_problems()
13647
13648         if not success:
13649                 return 1
13650
13651         def unresolved_deps():
13652
13653                 unresolvable = set()
13654                 for dep in resolver._initially_unsatisfied_deps:
13655                         if isinstance(dep.parent, Package) and \
13656                                 (dep.priority > UnmergeDepPriority.SOFT):
13657                                 unresolvable.add((dep.atom, dep.parent.cpv))
13658
13659                 if not unresolvable:
13660                         return False
13661
13662                 if unresolvable and not allow_missing_deps:
13663                         prefix = bad(" * ")
13664                         msg = []
13665                         msg.append("Dependencies could not be completely resolved due to")
13666                         msg.append("the following required packages not being installed:")
13667                         msg.append("")
13668                         for atom, parent in unresolvable:
13669                                 msg.append("  %s pulled in by:" % (atom,))
13670                                 msg.append("    %s" % (parent,))
13671                                 msg.append("")
13672                         msg.append("Have you forgotten to run " + \
13673                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13674                         msg.append(("to %s? It may be necessary to manually " + \
13675                                 "uninstall packages that no longer") % action)
13676                         msg.append("exist in the portage tree since " + \
13677                                 "it may not be possible to satisfy their")
13678                         msg.append("dependencies.  Also, be aware of " + \
13679                                 "the --with-bdeps option that is documented")
13680                         msg.append("in " + good("`man emerge`") + ".")
13681                         if action == "prune":
13682                                 msg.append("")
13683                                 msg.append("If you would like to ignore " + \
13684                                         "dependencies then use %s." % good("--nodeps"))
13685                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13686                                 level=logging.ERROR, noiselevel=-1)
13687                         return True
13688                 return False
13689
13690         if unresolved_deps():
13691                 return 1
13692
13693         graph = resolver.digraph.copy()
13694         required_pkgs_total = 0
13695         for node in graph:
13696                 if isinstance(node, Package):
13697                         required_pkgs_total += 1
13698
13699         def show_parents(child_node):
13700                 parent_nodes = graph.parent_nodes(child_node)
13701                 if not parent_nodes:
13702                         # With --prune, the highest version can be pulled in without any
13703                         # real parent since all installed packages are pulled in.  In that
13704                         # case there's nothing to show here.
13705                         return
13706                 parent_strs = []
13707                 for node in parent_nodes:
13708                         parent_strs.append(str(getattr(node, "cpv", node)))
13709                 parent_strs.sort()
13710                 msg = []
13711                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13712                 for parent_str in parent_strs:
13713                         msg.append("    %s\n" % (parent_str,))
13714                 msg.append("\n")
13715                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13716
13717         def cmp_pkg_cpv(pkg1, pkg2):
13718                 """Sort Package instances by cpv."""
13719                 if pkg1.cpv > pkg2.cpv:
13720                         return 1
13721                 elif pkg1.cpv == pkg2.cpv:
13722                         return 0
13723                 else:
13724                         return -1
13725
13726         def create_cleanlist():
13727                 pkgs_to_remove = []
13728
13729                 if action == "depclean":
13730                         if args_set:
13731
13732                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13733                                         arg_atom = None
13734                                         try:
13735                                                 arg_atom = args_set.findAtomForPackage(pkg)
13736                                         except portage.exception.InvalidDependString:
13737                                                 # this error has already been displayed by now
13738                                                 continue
13739
13740                                         if arg_atom:
13741                                                 if pkg not in graph:
13742                                                         pkgs_to_remove.append(pkg)
13743                                                 elif "--verbose" in myopts:
13744                                                         show_parents(pkg)
13745
13746                         else:
13747                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13748                                         if pkg not in graph:
13749                                                 pkgs_to_remove.append(pkg)
13750                                         elif "--verbose" in myopts:
13751                                                 show_parents(pkg)
13752
13753                 elif action == "prune":
13754                         # Prune really uses all installed instead of world. It's not
13755                         # a real reverse dependency so don't display it as such.
13756                         graph.remove(set_args["world"])
13757
13758                         for atom in args_set:
13759                                 for pkg in vardb.match_pkgs(atom):
13760                                         if pkg not in graph:
13761                                                 pkgs_to_remove.append(pkg)
13762                                         elif "--verbose" in myopts:
13763                                                 show_parents(pkg)
13764
13765                 if not pkgs_to_remove:
13766                         writemsg_level(
13767                                 ">>> No packages selected for removal by %s\n" % action)
13768                         if "--verbose" not in myopts:
13769                                 writemsg_level(
13770                                         ">>> To see reverse dependencies, use %s\n" % \
13771                                                 good("--verbose"))
13772                         if action == "prune":
13773                                 writemsg_level(
13774                                         ">>> To ignore dependencies, use %s\n" % \
13775                                                 good("--nodeps"))
13776
13777                 return pkgs_to_remove
13778
13779         cleanlist = create_cleanlist()
13780
13781         if len(cleanlist):
13782                 clean_set = set(cleanlist)
13783
13784                 # Check if any of these package are the sole providers of libraries
13785                 # with consumers that have not been selected for removal. If so, these
13786                 # packages and any dependencies need to be added to the graph.
13787                 real_vardb = trees[myroot]["vartree"].dbapi
13788                 linkmap = real_vardb.linkmap
13789                 liblist = linkmap.listLibraryObjects()
13790                 consumer_cache = {}
13791                 provider_cache = {}
13792                 soname_cache = {}
13793                 consumer_map = {}
13794
13795                 writemsg_level(">>> Checking for lib consumers...\n")
13796
13797                 for pkg in cleanlist:
13798                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13799                         provided_libs = set()
13800
13801                         for lib in liblist:
13802                                 if pkg_dblink.isowner(lib, myroot):
13803                                         provided_libs.add(lib)
13804
13805                         if not provided_libs:
13806                                 continue
13807
13808                         consumers = {}
13809                         for lib in provided_libs:
13810                                 lib_consumers = consumer_cache.get(lib)
13811                                 if lib_consumers is None:
13812                                         lib_consumers = linkmap.findConsumers(lib)
13813                                         consumer_cache[lib] = lib_consumers
13814                                 if lib_consumers:
13815                                         consumers[lib] = lib_consumers
13816
13817                         if not consumers:
13818                                 continue
13819
13820                         for lib, lib_consumers in consumers.items():
13821                                 for consumer_file in list(lib_consumers):
13822                                         if pkg_dblink.isowner(consumer_file, myroot):
13823                                                 lib_consumers.remove(consumer_file)
13824                                 if not lib_consumers:
13825                                         del consumers[lib]
13826
13827                         if not consumers:
13828                                 continue
13829
13830                         for lib, lib_consumers in consumers.iteritems():
13831
13832                                 soname = soname_cache.get(lib)
13833                                 if soname is None:
13834                                         soname = linkmap.getSoname(lib)
13835                                         soname_cache[lib] = soname
13836
13837                                 consumer_providers = []
13838                                 for lib_consumer in lib_consumers:
13839                                         providers = provider_cache.get(lib)
13840                                         if providers is None:
13841                                                 providers = linkmap.findProviders(lib_consumer)
13842                                                 provider_cache[lib_consumer] = providers
13843                                         if soname not in providers:
13844                                                 # Why does this happen?
13845                                                 continue
13846                                         consumer_providers.append(
13847                                                 (lib_consumer, providers[soname]))
13848
13849                                 consumers[lib] = consumer_providers
13850
13851                         consumer_map[pkg] = consumers
13852
13853                 if consumer_map:
13854
13855                         search_files = set()
13856                         for consumers in consumer_map.itervalues():
13857                                 for lib, consumer_providers in consumers.iteritems():
13858                                         for lib_consumer, providers in consumer_providers:
13859                                                 search_files.add(lib_consumer)
13860                                                 search_files.update(providers)
13861
13862                         writemsg_level(">>> Assigning files to packages...\n")
13863                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13864
13865                         for pkg, consumers in consumer_map.items():
13866                                 for lib, consumer_providers in consumers.items():
13867                                         lib_consumers = set()
13868
13869                                         for lib_consumer, providers in consumer_providers:
13870                                                 owner_set = file_owners.get(lib_consumer)
13871                                                 provider_dblinks = set()
13872                                                 provider_pkgs = set()
13873
13874                                                 if len(providers) > 1:
13875                                                         for provider in providers:
13876                                                                 provider_set = file_owners.get(provider)
13877                                                                 if provider_set is not None:
13878                                                                         provider_dblinks.update(provider_set)
13879
13880                                                 if len(provider_dblinks) > 1:
13881                                                         for provider_dblink in provider_dblinks:
13882                                                                 pkg_key = ("installed", myroot,
13883                                                                         provider_dblink.mycpv, "nomerge")
13884                                                                 if pkg_key not in clean_set:
13885                                                                         provider_pkgs.add(vardb.get(pkg_key))
13886
13887                                                 if provider_pkgs:
13888                                                         continue
13889
13890                                                 if owner_set is not None:
13891                                                         lib_consumers.update(owner_set)
13892
13893                                         for consumer_dblink in list(lib_consumers):
13894                                                 if ("installed", myroot, consumer_dblink.mycpv,
13895                                                         "nomerge") in clean_set:
13896                                                         lib_consumers.remove(consumer_dblink)
13897                                                         continue
13898
13899                                         if lib_consumers:
13900                                                 consumers[lib] = lib_consumers
13901                                         else:
13902                                                 del consumers[lib]
13903                                 if not consumers:
13904                                         del consumer_map[pkg]
13905
13906                 if consumer_map:
13907                         # TODO: Implement a package set for rebuilding consumer packages.
13908
13909                         msg = "In order to avoid breakage of link level " + \
13910                                 "dependencies, one or more packages will not be removed. " + \
13911                                 "This can be solved by rebuilding " + \
13912                                 "the packages that pulled them in."
13913
13914                         prefix = bad(" * ")
13915                         from textwrap import wrap
13916                         writemsg_level("".join(prefix + "%s\n" % line for \
13917                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13918
13919                         msg = []
13920                         for pkg, consumers in consumer_map.iteritems():
13921                                 unique_consumers = set(chain(*consumers.values()))
13922                                 unique_consumers = sorted(consumer.mycpv \
13923                                         for consumer in unique_consumers)
13924                                 msg.append("")
13925                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13926                                 for consumer in unique_consumers:
13927                                         msg.append("    %s" % (consumer,))
13928                         msg.append("")
13929                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13930                                 level=logging.WARNING, noiselevel=-1)
13931
13932                         # Add lib providers to the graph as children of lib consumers,
13933                         # and also add any dependencies pulled in by the provider.
13934                         writemsg_level(">>> Adding lib providers to graph...\n")
13935
13936                         for pkg, consumers in consumer_map.iteritems():
13937                                 for consumer_dblink in set(chain(*consumers.values())):
13938                                         consumer_pkg = vardb.get(("installed", myroot,
13939                                                 consumer_dblink.mycpv, "nomerge"))
13940                                         if not resolver._add_pkg(pkg,
13941                                                 Dependency(parent=consumer_pkg,
13942                                                 priority=UnmergeDepPriority(runtime=True),
13943                                                 root=pkg.root)):
13944                                                 resolver.display_problems()
13945                                                 return 1
13946
13947                         writemsg_level("\nCalculating dependencies  ")
13948                         success = resolver._complete_graph()
13949                         writemsg_level("\b\b... done!\n")
13950                         resolver.display_problems()
13951                         if not success:
13952                                 return 1
13953                         if unresolved_deps():
13954                                 return 1
13955
13956                         graph = resolver.digraph.copy()
13957                         required_pkgs_total = 0
13958                         for node in graph:
13959                                 if isinstance(node, Package):
13960                                         required_pkgs_total += 1
13961                         cleanlist = create_cleanlist()
13962                         if not cleanlist:
13963                                 return 0
13964                         clean_set = set(cleanlist)
13965
13966                 # Use a topological sort to create an unmerge order such that
13967                 # each package is unmerged before it's dependencies. This is
13968                 # necessary to avoid breaking things that may need to run
13969                 # during pkg_prerm or pkg_postrm phases.
13970
13971                 # Create a new graph to account for dependencies between the
13972                 # packages being unmerged.
13973                 graph = digraph()
13974                 del cleanlist[:]
13975
13976                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13977                 runtime = UnmergeDepPriority(runtime=True)
13978                 runtime_post = UnmergeDepPriority(runtime_post=True)
13979                 buildtime = UnmergeDepPriority(buildtime=True)
13980                 priority_map = {
13981                         "RDEPEND": runtime,
13982                         "PDEPEND": runtime_post,
13983                         "DEPEND": buildtime,
13984                 }
13985
13986                 for node in clean_set:
13987                         graph.add(node, None)
13988                         mydeps = []
13989                         node_use = node.metadata["USE"].split()
13990                         for dep_type in dep_keys:
13991                                 depstr = node.metadata[dep_type]
13992                                 if not depstr:
13993                                         continue
13994                                 try:
13995                                         portage.dep._dep_check_strict = False
13996                                         success, atoms = portage.dep_check(depstr, None, settings,
13997                                                 myuse=node_use, trees=resolver._graph_trees,
13998                                                 myroot=myroot)
13999                                 finally:
14000                                         portage.dep._dep_check_strict = True
14001                                 if not success:
14002                                         # Ignore invalid deps of packages that will
14003                                         # be uninstalled anyway.
14004                                         continue
14005
14006                                 priority = priority_map[dep_type]
14007                                 for atom in atoms:
14008                                         if not isinstance(atom, portage.dep.Atom):
14009                                                 # Ignore invalid atoms returned from dep_check().
14010                                                 continue
14011                                         if atom.blocker:
14012                                                 continue
14013                                         matches = vardb.match_pkgs(atom)
14014                                         if not matches:
14015                                                 continue
14016                                         for child_node in matches:
14017                                                 if child_node in clean_set:
14018                                                         graph.add(child_node, node, priority=priority)
14019
14020                 ordered = True
14021                 if len(graph.order) == len(graph.root_nodes()):
14022                         # If there are no dependencies between packages
14023                         # let unmerge() group them by cat/pn.
14024                         ordered = False
14025                         cleanlist = [pkg.cpv for pkg in graph.order]
14026                 else:
14027                         # Order nodes from lowest to highest overall reference count for
14028                         # optimal root node selection.
14029                         node_refcounts = {}
14030                         for node in graph.order:
14031                                 node_refcounts[node] = len(graph.parent_nodes(node))
14032                         def cmp_reference_count(node1, node2):
14033                                 return node_refcounts[node1] - node_refcounts[node2]
14034                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14035         
14036                         ignore_priority_range = [None]
14037                         ignore_priority_range.extend(
14038                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14039                         while not graph.empty():
14040                                 for ignore_priority in ignore_priority_range:
14041                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14042                                         if nodes:
14043                                                 break
14044                                 if not nodes:
14045                                         raise AssertionError("no root nodes")
14046                                 if ignore_priority is not None:
14047                                         # Some deps have been dropped due to circular dependencies,
14048                                         # so only pop one node in order do minimize the number that
14049                                         # are dropped.
14050                                         del nodes[1:]
14051                                 for node in nodes:
14052                                         graph.remove(node)
14053                                         cleanlist.append(node.cpv)
14054
14055                 unmerge(root_config, myopts, "unmerge", cleanlist,
14056                         ldpath_mtimes, ordered=ordered)
14057
14058         if action == "prune":
14059                 return
14060
14061         if not cleanlist and "--quiet" in myopts:
14062                 return
14063
14064         print "Packages installed:   "+str(len(vardb.cpv_all()))
14065         print "Packages in world:    " + \
14066                 str(len(root_config.sets["world"].getAtoms()))
14067         print "Packages in system:   " + \
14068                 str(len(root_config.sets["system"].getAtoms()))
14069         print "Required packages:    "+str(required_pkgs_total)
14070         if "--pretend" in myopts:
14071                 print "Number to remove:     "+str(len(cleanlist))
14072         else:
14073                 print "Number removed:       "+str(len(cleanlist))
14074
14075 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14076         """
14077         Construct a depgraph for the given resume list. This will raise
14078         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14079         @rtype: tuple
14080         @returns: (success, depgraph, dropped_tasks)
14081         """
14082         skip_masked = True
14083         skip_unsatisfied = True
14084         mergelist = mtimedb["resume"]["mergelist"]
14085         dropped_tasks = set()
14086         while True:
14087                 mydepgraph = depgraph(settings, trees,
14088                         myopts, myparams, spinner)
14089                 try:
14090                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14091                                 skip_masked=skip_masked)
14092                 except depgraph.UnsatisfiedResumeDep, e:
14093                         if not skip_unsatisfied:
14094                                 raise
14095
14096                         graph = mydepgraph.digraph
14097                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14098                                 for dep in e.value)
14099                         traversed_nodes = set()
14100                         unsatisfied_stack = list(unsatisfied_parents)
14101                         while unsatisfied_stack:
14102                                 pkg = unsatisfied_stack.pop()
14103                                 if pkg in traversed_nodes:
14104                                         continue
14105                                 traversed_nodes.add(pkg)
14106
14107                                 # If this package was pulled in by a parent
14108                                 # package scheduled for merge, removing this
14109                                 # package may cause the the parent package's
14110                                 # dependency to become unsatisfied.
14111                                 for parent_node in graph.parent_nodes(pkg):
14112                                         if not isinstance(parent_node, Package) \
14113                                                 or parent_node.operation not in ("merge", "nomerge"):
14114                                                 continue
14115                                         unsatisfied = \
14116                                                 graph.child_nodes(parent_node,
14117                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14118                                         if pkg in unsatisfied:
14119                                                 unsatisfied_parents[parent_node] = parent_node
14120                                                 unsatisfied_stack.append(parent_node)
14121
14122                         pruned_mergelist = []
14123                         for x in mergelist:
14124                                 if isinstance(x, list) and \
14125                                         tuple(x) not in unsatisfied_parents:
14126                                         pruned_mergelist.append(x)
14127
14128                         # If the mergelist doesn't shrink then this loop is infinite.
14129                         if len(pruned_mergelist) == len(mergelist):
14130                                 # This happens if a package can't be dropped because
14131                                 # it's already installed, but it has unsatisfied PDEPEND.
14132                                 raise
14133                         mergelist[:] = pruned_mergelist
14134
14135                         # Exclude installed packages that have been removed from the graph due
14136                         # to failure to build/install runtime dependencies after the dependent
14137                         # package has already been installed.
14138                         dropped_tasks.update(pkg for pkg in \
14139                                 unsatisfied_parents if pkg.operation != "nomerge")
14140                         mydepgraph.break_refs(unsatisfied_parents)
14141
14142                         del e, graph, traversed_nodes, \
14143                                 unsatisfied_parents, unsatisfied_stack
14144                         continue
14145                 else:
14146                         break
14147         return (success, mydepgraph, dropped_tasks)
14148
14149 def action_build(settings, trees, mtimedb,
14150         myopts, myaction, myfiles, spinner):
14151
14152         # validate the state of the resume data
14153         # so that we can make assumptions later.
14154         for k in ("resume", "resume_backup"):
14155                 if k not in mtimedb:
14156                         continue
14157                 resume_data = mtimedb[k]
14158                 if not isinstance(resume_data, dict):
14159                         del mtimedb[k]
14160                         continue
14161                 mergelist = resume_data.get("mergelist")
14162                 if not isinstance(mergelist, list):
14163                         del mtimedb[k]
14164                         continue
14165                 for x in mergelist:
14166                         if not (isinstance(x, list) and len(x) == 4):
14167                                 continue
14168                         pkg_type, pkg_root, pkg_key, pkg_action = x
14169                         if pkg_root not in trees:
14170                                 # Current $ROOT setting differs,
14171                                 # so the list must be stale.
14172                                 mergelist = None
14173                                 break
14174                 if not mergelist:
14175                         del mtimedb[k]
14176                         continue
14177                 resume_opts = resume_data.get("myopts")
14178                 if not isinstance(resume_opts, (dict, list)):
14179                         del mtimedb[k]
14180                         continue
14181                 favorites = resume_data.get("favorites")
14182                 if not isinstance(favorites, list):
14183                         del mtimedb[k]
14184                         continue
14185
14186         resume = False
14187         if "--resume" in myopts and \
14188                 ("resume" in mtimedb or
14189                 "resume_backup" in mtimedb):
14190                 resume = True
14191                 if "resume" not in mtimedb:
14192                         mtimedb["resume"] = mtimedb["resume_backup"]
14193                         del mtimedb["resume_backup"]
14194                         mtimedb.commit()
14195                 # "myopts" is a list for backward compatibility.
14196                 resume_opts = mtimedb["resume"].get("myopts", [])
14197                 if isinstance(resume_opts, list):
14198                         resume_opts = dict((k,True) for k in resume_opts)
14199                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14200                         resume_opts.pop(opt, None)
14201                 myopts.update(resume_opts)
14202
14203                 if "--debug" in myopts:
14204                         writemsg_level("myopts %s\n" % (myopts,))
14205
14206                 # Adjust config according to options of the command being resumed.
14207                 for myroot in trees:
14208                         mysettings =  trees[myroot]["vartree"].settings
14209                         mysettings.unlock()
14210                         adjust_config(myopts, mysettings)
14211                         mysettings.lock()
14212                         del myroot, mysettings
14213
14214         ldpath_mtimes = mtimedb["ldpath"]
14215         favorites=[]
14216         merge_count = 0
14217         buildpkgonly = "--buildpkgonly" in myopts
14218         pretend = "--pretend" in myopts
14219         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14220         ask = "--ask" in myopts
14221         nodeps = "--nodeps" in myopts
14222         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14223         tree = "--tree" in myopts
14224         if nodeps and tree:
14225                 tree = False
14226                 del myopts["--tree"]
14227                 portage.writemsg(colorize("WARN", " * ") + \
14228                         "--tree is broken with --nodeps. Disabling...\n")
14229         debug = "--debug" in myopts
14230         verbose = "--verbose" in myopts
14231         quiet = "--quiet" in myopts
14232         if pretend or fetchonly:
14233                 # make the mtimedb readonly
14234                 mtimedb.filename = None
14235         if "--digest" in myopts:
14236                 msg = "The --digest option can prevent corruption from being" + \
14237                         " noticed. The `repoman manifest` command is the preferred" + \
14238                         " way to generate manifests and it is capable of doing an" + \
14239                         " entire repository or category at once."
14240                 prefix = bad(" * ")
14241                 writemsg(prefix + "\n")
14242                 from textwrap import wrap
14243                 for line in wrap(msg, 72):
14244                         writemsg("%s%s\n" % (prefix, line))
14245                 writemsg(prefix + "\n")
14246
14247         if "--quiet" not in myopts and \
14248                 ("--pretend" in myopts or "--ask" in myopts or \
14249                 "--tree" in myopts or "--verbose" in myopts):
14250                 action = ""
14251                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14252                         action = "fetched"
14253                 elif "--buildpkgonly" in myopts:
14254                         action = "built"
14255                 else:
14256                         action = "merged"
14257                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14258                         print
14259                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14260                         print
14261                 else:
14262                         print
14263                         print darkgreen("These are the packages that would be %s, in order:") % action
14264                         print
14265
14266         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14267         if not show_spinner:
14268                 spinner.update = spinner.update_quiet
14269
14270         if resume:
14271                 favorites = mtimedb["resume"].get("favorites")
14272                 if not isinstance(favorites, list):
14273                         favorites = []
14274
14275                 if show_spinner:
14276                         print "Calculating dependencies  ",
14277                 myparams = create_depgraph_params(myopts, myaction)
14278
14279                 resume_data = mtimedb["resume"]
14280                 mergelist = resume_data["mergelist"]
14281                 if mergelist and "--skipfirst" in myopts:
14282                         for i, task in enumerate(mergelist):
14283                                 if isinstance(task, list) and \
14284                                         task and task[-1] == "merge":
14285                                         del mergelist[i]
14286                                         break
14287
14288                 success = False
14289                 mydepgraph = None
14290                 try:
14291                         success, mydepgraph, dropped_tasks = resume_depgraph(
14292                                 settings, trees, mtimedb, myopts, myparams, spinner)
14293                 except (portage.exception.PackageNotFound,
14294                         depgraph.UnsatisfiedResumeDep), e:
14295                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14296                                 mydepgraph = e.depgraph
14297                         if show_spinner:
14298                                 print
14299                         from textwrap import wrap
14300                         from portage.output import EOutput
14301                         out = EOutput()
14302
14303                         resume_data = mtimedb["resume"]
14304                         mergelist = resume_data.get("mergelist")
14305                         if not isinstance(mergelist, list):
14306                                 mergelist = []
14307                         if mergelist and debug or (verbose and not quiet):
14308                                 out.eerror("Invalid resume list:")
14309                                 out.eerror("")
14310                                 indent = "  "
14311                                 for task in mergelist:
14312                                         if isinstance(task, list):
14313                                                 out.eerror(indent + str(tuple(task)))
14314                                 out.eerror("")
14315
14316                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14317                                 out.eerror("One or more packages are either masked or " + \
14318                                         "have missing dependencies:")
14319                                 out.eerror("")
14320                                 indent = "  "
14321                                 for dep in e.value:
14322                                         if dep.atom is None:
14323                                                 out.eerror(indent + "Masked package:")
14324                                                 out.eerror(2 * indent + str(dep.parent))
14325                                                 out.eerror("")
14326                                         else:
14327                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14328                                                 out.eerror(2 * indent + str(dep.parent))
14329                                                 out.eerror("")
14330                                 msg = "The resume list contains packages " + \
14331                                         "that are either masked or have " + \
14332                                         "unsatisfied dependencies. " + \
14333                                         "Please restart/continue " + \
14334                                         "the operation manually, or use --skipfirst " + \
14335                                         "to skip the first package in the list and " + \
14336                                         "any other packages that may be " + \
14337                                         "masked or have missing dependencies."
14338                                 for line in wrap(msg, 72):
14339                                         out.eerror(line)
14340                         elif isinstance(e, portage.exception.PackageNotFound):
14341                                 out.eerror("An expected package is " + \
14342                                         "not available: %s" % str(e))
14343                                 out.eerror("")
14344                                 msg = "The resume list contains one or more " + \
14345                                         "packages that are no longer " + \
14346                                         "available. Please restart/continue " + \
14347                                         "the operation manually."
14348                                 for line in wrap(msg, 72):
14349                                         out.eerror(line)
14350                 else:
14351                         if show_spinner:
14352                                 print "\b\b... done!"
14353
14354                 if success:
14355                         if dropped_tasks:
14356                                 portage.writemsg("!!! One or more packages have been " + \
14357                                         "dropped due to\n" + \
14358                                         "!!! masking or unsatisfied dependencies:\n\n",
14359                                         noiselevel=-1)
14360                                 for task in dropped_tasks:
14361                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14362                                 portage.writemsg("\n", noiselevel=-1)
14363                         del dropped_tasks
14364                 else:
14365                         if mydepgraph is not None:
14366                                 mydepgraph.display_problems()
14367                         if not (ask or pretend):
14368                                 # delete the current list and also the backup
14369                                 # since it's probably stale too.
14370                                 for k in ("resume", "resume_backup"):
14371                                         mtimedb.pop(k, None)
14372                                 mtimedb.commit()
14373
14374                         return 1
14375         else:
14376                 if ("--resume" in myopts):
14377                         print darkgreen("emerge: It seems we have nothing to resume...")
14378                         return os.EX_OK
14379
14380                 myparams = create_depgraph_params(myopts, myaction)
14381                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14382                         print "Calculating dependencies  ",
14383                         sys.stdout.flush()
14384                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14385                 try:
14386                         retval, favorites = mydepgraph.select_files(myfiles)
14387                 except portage.exception.PackageNotFound, e:
14388                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14389                         return 1
14390                 except portage.exception.PackageSetNotFound, e:
14391                         root_config = trees[settings["ROOT"]]["root_config"]
14392                         display_missing_pkg_set(root_config, e.value)
14393                         return 1
14394                 if show_spinner:
14395                         print "\b\b... done!"
14396                 if not retval:
14397                         mydepgraph.display_problems()
14398                         return 1
14399
14400         if "--pretend" not in myopts and \
14401                 ("--ask" in myopts or "--tree" in myopts or \
14402                 "--verbose" in myopts) and \
14403                 not ("--quiet" in myopts and "--ask" not in myopts):
14404                 if "--resume" in myopts:
14405                         mymergelist = mydepgraph.altlist()
14406                         if len(mymergelist) == 0:
14407                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14408                                 return os.EX_OK
14409                         favorites = mtimedb["resume"]["favorites"]
14410                         retval = mydepgraph.display(
14411                                 mydepgraph.altlist(reversed=tree),
14412                                 favorites=favorites)
14413                         mydepgraph.display_problems()
14414                         if retval != os.EX_OK:
14415                                 return retval
14416                         prompt="Would you like to resume merging these packages?"
14417                 else:
14418                         retval = mydepgraph.display(
14419                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14420                                 favorites=favorites)
14421                         mydepgraph.display_problems()
14422                         if retval != os.EX_OK:
14423                                 return retval
14424                         mergecount=0
14425                         for x in mydepgraph.altlist():
14426                                 if isinstance(x, Package) and x.operation == "merge":
14427                                         mergecount += 1
14428
14429                         if mergecount==0:
14430                                 sets = trees[settings["ROOT"]]["root_config"].sets
14431                                 world_candidates = None
14432                                 if "--noreplace" in myopts and \
14433                                         not oneshot and favorites:
14434                                         # Sets that are not world candidates are filtered
14435                                         # out here since the favorites list needs to be
14436                                         # complete for depgraph.loadResumeCommand() to
14437                                         # operate correctly.
14438                                         world_candidates = [x for x in favorites \
14439                                                 if not (x.startswith(SETPREFIX) and \
14440                                                 not sets[x[1:]].world_candidate)]
14441                                 if "--noreplace" in myopts and \
14442                                         not oneshot and world_candidates:
14443                                         print
14444                                         for x in world_candidates:
14445                                                 print " %s %s" % (good("*"), x)
14446                                         prompt="Would you like to add these packages to your world favorites?"
14447                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14448                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14449                                 else:
14450                                         print
14451                                         print "Nothing to merge; quitting."
14452                                         print
14453                                         return os.EX_OK
14454                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14455                                 prompt="Would you like to fetch the source files for these packages?"
14456                         else:
14457                                 prompt="Would you like to merge these packages?"
14458                 print
14459                 if "--ask" in myopts and userquery(prompt) == "No":
14460                         print
14461                         print "Quitting."
14462                         print
14463                         return os.EX_OK
14464                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14465                 myopts.pop("--ask", None)
14466
14467         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14468                 if ("--resume" in myopts):
14469                         mymergelist = mydepgraph.altlist()
14470                         if len(mymergelist) == 0:
14471                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14472                                 return os.EX_OK
14473                         favorites = mtimedb["resume"]["favorites"]
14474                         retval = mydepgraph.display(
14475                                 mydepgraph.altlist(reversed=tree),
14476                                 favorites=favorites)
14477                         mydepgraph.display_problems()
14478                         if retval != os.EX_OK:
14479                                 return retval
14480                 else:
14481                         retval = mydepgraph.display(
14482                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14483                                 favorites=favorites)
14484                         mydepgraph.display_problems()
14485                         if retval != os.EX_OK:
14486                                 return retval
14487                         if "--buildpkgonly" in myopts:
14488                                 graph_copy = mydepgraph.digraph.clone()
14489                                 removed_nodes = set()
14490                                 for node in list(graph_copy.order):
14491                                         if not isinstance(node, Package) or \
14492                                                 node.operation == "nomerge":
14493                                                 removed_nodes.add(node)
14494                                 graph_copy.difference_update(removed_nodes)
14495                                 if not graph_copy.hasallzeros(ignore_priority = \
14496                                         DepPrioritySatisfiedRange.ignore_medium):
14497                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14498                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14499                                         return 1
14500         else:
14501                 if "--buildpkgonly" in myopts:
14502                         graph_copy = mydepgraph.digraph.clone()
14503                         removed_nodes = set()
14504                         for node in list(graph_copy.order):
14505                                 if not isinstance(node, Package) or \
14506                                         node.operation == "nomerge":
14507                                         removed_nodes.add(node)
14508                         graph_copy.difference_update(removed_nodes)
14509                         if not graph_copy.hasallzeros(ignore_priority = \
14510                                 DepPrioritySatisfiedRange.ignore_medium):
14511                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14512                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14513                                 return 1
14514
14515                 if ("--resume" in myopts):
14516                         favorites=mtimedb["resume"]["favorites"]
14517                         mymergelist = mydepgraph.altlist()
14518                         mydepgraph.break_refs(mymergelist)
14519                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14520                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14521                         del mydepgraph, mymergelist
14522                         clear_caches(trees)
14523
14524                         retval = mergetask.merge()
14525                         merge_count = mergetask.curval
14526                 else:
14527                         if "resume" in mtimedb and \
14528                         "mergelist" in mtimedb["resume"] and \
14529                         len(mtimedb["resume"]["mergelist"]) > 1:
14530                                 mtimedb["resume_backup"] = mtimedb["resume"]
14531                                 del mtimedb["resume"]
14532                                 mtimedb.commit()
14533                         mtimedb["resume"]={}
14534                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14535                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14536                         # a list type for options.
14537                         mtimedb["resume"]["myopts"] = myopts.copy()
14538
14539                         # Convert Atom instances to plain str.
14540                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14541
14542                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14543                                 for pkgline in mydepgraph.altlist():
14544                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14545                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14546                                                 tmpsettings = portage.config(clone=settings)
14547                                                 edebug = 0
14548                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14549                                                         edebug = 1
14550                                                 retval = portage.doebuild(
14551                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14552                                                         ("--pretend" in myopts),
14553                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14554                                                         tree="porttree")
14555
14556                         pkglist = mydepgraph.altlist()
14557                         mydepgraph.saveNomergeFavorites()
14558                         mydepgraph.break_refs(pkglist)
14559                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14560                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14561                         del mydepgraph, pkglist
14562                         clear_caches(trees)
14563
14564                         retval = mergetask.merge()
14565                         merge_count = mergetask.curval
14566
14567                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14568                         if "yes" == settings.get("AUTOCLEAN"):
14569                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14570                                 unmerge(trees[settings["ROOT"]]["root_config"],
14571                                         myopts, "clean", [],
14572                                         ldpath_mtimes, autoclean=1)
14573                         else:
14574                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14575                                         + " AUTOCLEAN is disabled.  This can cause serious"
14576                                         + " problems due to overlapping packages.\n")
14577                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14578
14579                 return retval
14580
14581 def multiple_actions(action1, action2):
14582         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14583         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14584         sys.exit(1)
14585
14586 def insert_optional_args(args):
14587         """
14588         Parse optional arguments and insert a value if one has
14589         not been provided. This is done before feeding the args
14590         to the optparse parser since that parser does not support
14591         this feature natively.
14592         """
14593
14594         new_args = []
14595         jobs_opts = ("-j", "--jobs")
14596         arg_stack = args[:]
14597         arg_stack.reverse()
14598         while arg_stack:
14599                 arg = arg_stack.pop()
14600
14601                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14602                 if not (short_job_opt or arg in jobs_opts):
14603                         new_args.append(arg)
14604                         continue
14605
14606                 # Insert an empty placeholder in order to
14607                 # satisfy the requirements of optparse.
14608
14609                 new_args.append("--jobs")
14610                 job_count = None
14611                 saved_opts = None
14612                 if short_job_opt and len(arg) > 2:
14613                         if arg[:2] == "-j":
14614                                 try:
14615                                         job_count = int(arg[2:])
14616                                 except ValueError:
14617                                         saved_opts = arg[2:]
14618                         else:
14619                                 job_count = "True"
14620                                 saved_opts = arg[1:].replace("j", "")
14621
14622                 if job_count is None and arg_stack:
14623                         try:
14624                                 job_count = int(arg_stack[-1])
14625                         except ValueError:
14626                                 pass
14627                         else:
14628                                 # Discard the job count from the stack
14629                                 # since we're consuming it here.
14630                                 arg_stack.pop()
14631
14632                 if job_count is None:
14633                         # unlimited number of jobs
14634                         new_args.append("True")
14635                 else:
14636                         new_args.append(str(job_count))
14637
14638                 if saved_opts is not None:
14639                         new_args.append("-" + saved_opts)
14640
14641         return new_args
14642
14643 def parse_opts(tmpcmdline, silent=False):
14644         myaction=None
14645         myopts = {}
14646         myfiles=[]
14647
14648         global actions, options, shortmapping
14649
14650         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14651         argument_options = {
14652                 "--config-root": {
14653                         "help":"specify the location for portage configuration files",
14654                         "action":"store"
14655                 },
14656                 "--color": {
14657                         "help":"enable or disable color output",
14658                         "type":"choice",
14659                         "choices":("y", "n")
14660                 },
14661
14662                 "--jobs": {
14663
14664                         "help"   : "Specifies the number of packages to build " + \
14665                                 "simultaneously.",
14666
14667                         "action" : "store"
14668                 },
14669
14670                 "--load-average": {
14671
14672                         "help"   :"Specifies that no new builds should be started " + \
14673                                 "if there are other builds running and the load average " + \
14674                                 "is at least LOAD (a floating-point number).",
14675
14676                         "action" : "store"
14677                 },
14678
14679                 "--with-bdeps": {
14680                         "help":"include unnecessary build time dependencies",
14681                         "type":"choice",
14682                         "choices":("y", "n")
14683                 },
14684                 "--reinstall": {
14685                         "help":"specify conditions to trigger package reinstallation",
14686                         "type":"choice",
14687                         "choices":["changed-use"]
14688                 }
14689         }
14690
14691         from optparse import OptionParser
14692         parser = OptionParser()
14693         if parser.has_option("--help"):
14694                 parser.remove_option("--help")
14695
14696         for action_opt in actions:
14697                 parser.add_option("--" + action_opt, action="store_true",
14698                         dest=action_opt.replace("-", "_"), default=False)
14699         for myopt in options:
14700                 parser.add_option(myopt, action="store_true",
14701                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14702         for shortopt, longopt in shortmapping.iteritems():
14703                 parser.add_option("-" + shortopt, action="store_true",
14704                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14705         for myalias, myopt in longopt_aliases.iteritems():
14706                 parser.add_option(myalias, action="store_true",
14707                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14708
14709         for myopt, kwargs in argument_options.iteritems():
14710                 parser.add_option(myopt,
14711                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14712
14713         tmpcmdline = insert_optional_args(tmpcmdline)
14714
14715         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14716
14717         if myoptions.jobs:
14718                 jobs = None
14719                 if myoptions.jobs == "True":
14720                         jobs = True
14721                 else:
14722                         try:
14723                                 jobs = int(myoptions.jobs)
14724                         except ValueError:
14725                                 jobs = -1
14726
14727                 if jobs is not True and \
14728                         jobs < 1:
14729                         jobs = None
14730                         if not silent:
14731                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14732                                         (myoptions.jobs,), noiselevel=-1)
14733
14734                 myoptions.jobs = jobs
14735
14736         if myoptions.load_average:
14737                 try:
14738                         load_average = float(myoptions.load_average)
14739                 except ValueError:
14740                         load_average = 0.0
14741
14742                 if load_average <= 0.0:
14743                         load_average = None
14744                         if not silent:
14745                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14746                                         (myoptions.load_average,), noiselevel=-1)
14747
14748                 myoptions.load_average = load_average
14749
14750         for myopt in options:
14751                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14752                 if v:
14753                         myopts[myopt] = True
14754
14755         for myopt in argument_options:
14756                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14757                 if v is not None:
14758                         myopts[myopt] = v
14759
14760         for action_opt in actions:
14761                 v = getattr(myoptions, action_opt.replace("-", "_"))
14762                 if v:
14763                         if myaction:
14764                                 multiple_actions(myaction, action_opt)
14765                                 sys.exit(1)
14766                         myaction = action_opt
14767
14768         myfiles += myargs
14769
14770         return myaction, myopts, myfiles
14771
14772 def validate_ebuild_environment(trees):
14773         for myroot in trees:
14774                 settings = trees[myroot]["vartree"].settings
14775                 settings.validate()
14776
14777 def clear_caches(trees):
14778         for d in trees.itervalues():
14779                 d["porttree"].dbapi.melt()
14780                 d["porttree"].dbapi._aux_cache.clear()
14781                 d["bintree"].dbapi._aux_cache.clear()
14782                 d["bintree"].dbapi._clear_cache()
14783                 d["vartree"].dbapi.linkmap._clear_cache()
14784         portage.dircache.clear()
14785         gc.collect()
14786
14787 def load_emerge_config(trees=None):
14788         kwargs = {}
14789         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14790                 v = os.environ.get(envvar, None)
14791                 if v and v.strip():
14792                         kwargs[k] = v
14793         trees = portage.create_trees(trees=trees, **kwargs)
14794
14795         for root, root_trees in trees.iteritems():
14796                 settings = root_trees["vartree"].settings
14797                 setconfig = load_default_config(settings, root_trees)
14798                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14799
14800         settings = trees["/"]["vartree"].settings
14801
14802         for myroot in trees:
14803                 if myroot != "/":
14804                         settings = trees[myroot]["vartree"].settings
14805                         break
14806
14807         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14808         mtimedb = portage.MtimeDB(mtimedbfile)
14809         
14810         return settings, trees, mtimedb
14811
14812 def adjust_config(myopts, settings):
14813         """Make emerge specific adjustments to the config."""
14814
14815         # To enhance usability, make some vars case insensitive by forcing them to
14816         # lower case.
14817         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14818                 if myvar in settings:
14819                         settings[myvar] = settings[myvar].lower()
14820                         settings.backup_changes(myvar)
14821         del myvar
14822
14823         # Kill noauto as it will break merges otherwise.
14824         if "noauto" in settings.features:
14825                 while "noauto" in settings.features:
14826                         settings.features.remove("noauto")
14827                 settings["FEATURES"] = " ".join(settings.features)
14828                 settings.backup_changes("FEATURES")
14829
14830         CLEAN_DELAY = 5
14831         try:
14832                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14833         except ValueError, e:
14834                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14835                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14836                         settings["CLEAN_DELAY"], noiselevel=-1)
14837         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14838         settings.backup_changes("CLEAN_DELAY")
14839
14840         EMERGE_WARNING_DELAY = 10
14841         try:
14842                 EMERGE_WARNING_DELAY = int(settings.get(
14843                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14844         except ValueError, e:
14845                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14846                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14847                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14848         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14849         settings.backup_changes("EMERGE_WARNING_DELAY")
14850
14851         if "--quiet" in myopts:
14852                 settings["PORTAGE_QUIET"]="1"
14853                 settings.backup_changes("PORTAGE_QUIET")
14854
14855         if "--verbose" in myopts:
14856                 settings["PORTAGE_VERBOSE"] = "1"
14857                 settings.backup_changes("PORTAGE_VERBOSE")
14858
14859         # Set so that configs will be merged regardless of remembered status
14860         if ("--noconfmem" in myopts):
14861                 settings["NOCONFMEM"]="1"
14862                 settings.backup_changes("NOCONFMEM")
14863
14864         # Set various debug markers... They should be merged somehow.
14865         PORTAGE_DEBUG = 0
14866         try:
14867                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14868                 if PORTAGE_DEBUG not in (0, 1):
14869                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14870                                 PORTAGE_DEBUG, noiselevel=-1)
14871                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14872                                 noiselevel=-1)
14873                         PORTAGE_DEBUG = 0
14874         except ValueError, e:
14875                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14876                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14877                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14878                 del e
14879         if "--debug" in myopts:
14880                 PORTAGE_DEBUG = 1
14881         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14882         settings.backup_changes("PORTAGE_DEBUG")
14883
14884         if settings.get("NOCOLOR") not in ("yes","true"):
14885                 portage.output.havecolor = 1
14886
14887         """The explicit --color < y | n > option overrides the NOCOLOR environment
14888         variable and stdout auto-detection."""
14889         if "--color" in myopts:
14890                 if "y" == myopts["--color"]:
14891                         portage.output.havecolor = 1
14892                         settings["NOCOLOR"] = "false"
14893                 else:
14894                         portage.output.havecolor = 0
14895                         settings["NOCOLOR"] = "true"
14896                 settings.backup_changes("NOCOLOR")
14897         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14898                 portage.output.havecolor = 0
14899                 settings["NOCOLOR"] = "true"
14900                 settings.backup_changes("NOCOLOR")
14901
14902 def apply_priorities(settings):
14903         ionice(settings)
14904         nice(settings)
14905
14906 def nice(settings):
14907         try:
14908                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14909         except (OSError, ValueError), e:
14910                 out = portage.output.EOutput()
14911                 out.eerror("Failed to change nice value to '%s'" % \
14912                         settings["PORTAGE_NICENESS"])
14913                 out.eerror("%s\n" % str(e))
14914
14915 def ionice(settings):
14916
14917         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14918         if ionice_cmd:
14919                 ionice_cmd = shlex.split(ionice_cmd)
14920         if not ionice_cmd:
14921                 return
14922
14923         from portage.util import varexpand
14924         variables = {"PID" : str(os.getpid())}
14925         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14926
14927         try:
14928                 rval = portage.process.spawn(cmd, env=os.environ)
14929         except portage.exception.CommandNotFound:
14930                 # The OS kernel probably doesn't support ionice,
14931                 # so return silently.
14932                 return
14933
14934         if rval != os.EX_OK:
14935                 out = portage.output.EOutput()
14936                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14937                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14938
14939 def display_missing_pkg_set(root_config, set_name):
14940
14941         msg = []
14942         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14943                 "The following sets exist:") % \
14944                 colorize("INFORM", set_name))
14945         msg.append("")
14946
14947         for s in sorted(root_config.sets):
14948                 msg.append("    %s" % s)
14949         msg.append("")
14950
14951         writemsg_level("".join("%s\n" % l for l in msg),
14952                 level=logging.ERROR, noiselevel=-1)
14953
14954 def expand_set_arguments(myfiles, myaction, root_config):
14955         retval = os.EX_OK
14956         setconfig = root_config.setconfig
14957
14958         sets = setconfig.getSets()
14959
14960         # In order to know exactly which atoms/sets should be added to the
14961         # world file, the depgraph performs set expansion later. It will get
14962         # confused about where the atoms came from if it's not allowed to
14963         # expand them itself.
14964         do_not_expand = (None, )
14965         newargs = []
14966         for a in myfiles:
14967                 if a in ("system", "world"):
14968                         newargs.append(SETPREFIX+a)
14969                 else:
14970                         newargs.append(a)
14971         myfiles = newargs
14972         del newargs
14973         newargs = []
14974
14975         # separators for set arguments
14976         ARG_START = "{"
14977         ARG_END = "}"
14978
14979         # WARNING: all operators must be of equal length
14980         IS_OPERATOR = "/@"
14981         DIFF_OPERATOR = "-@"
14982         UNION_OPERATOR = "+@"
14983         
14984         for i in range(0, len(myfiles)):
14985                 if myfiles[i].startswith(SETPREFIX):
14986                         start = 0
14987                         end = 0
14988                         x = myfiles[i][len(SETPREFIX):]
14989                         newset = ""
14990                         while x:
14991                                 start = x.find(ARG_START)
14992                                 end = x.find(ARG_END)
14993                                 if start > 0 and start < end:
14994                                         namepart = x[:start]
14995                                         argpart = x[start+1:end]
14996                                 
14997                                         # TODO: implement proper quoting
14998                                         args = argpart.split(",")
14999                                         options = {}
15000                                         for a in args:
15001                                                 if "=" in a:
15002                                                         k, v  = a.split("=", 1)
15003                                                         options[k] = v
15004                                                 else:
15005                                                         options[a] = "True"
15006                                         setconfig.update(namepart, options)
15007                                         newset += (x[:start-len(namepart)]+namepart)
15008                                         x = x[end+len(ARG_END):]
15009                                 else:
15010                                         newset += x
15011                                         x = ""
15012                         myfiles[i] = SETPREFIX+newset
15013                                 
15014         sets = setconfig.getSets()
15015
15016         # display errors that occured while loading the SetConfig instance
15017         for e in setconfig.errors:
15018                 print colorize("BAD", "Error during set creation: %s" % e)
15019         
15020         # emerge relies on the existance of sets with names "world" and "system"
15021         required_sets = ("world", "system")
15022         missing_sets = []
15023
15024         for s in required_sets:
15025                 if s not in sets:
15026                         missing_sets.append(s)
15027         if missing_sets:
15028                 if len(missing_sets) > 2:
15029                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15030                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15031                 elif len(missing_sets) == 2:
15032                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15033                 else:
15034                         missing_sets_str = '"%s"' % missing_sets[-1]
15035                 msg = ["emerge: incomplete set configuration, " + \
15036                         "missing set(s): %s" % missing_sets_str]
15037                 if sets:
15038                         msg.append("        sets defined: %s" % ", ".join(sets))
15039                 msg.append("        This usually means that '%s'" % \
15040                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15041                 msg.append("        is missing or corrupt.")
15042                 for line in msg:
15043                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15044                 return (None, 1)
15045         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15046
15047         for a in myfiles:
15048                 if a.startswith(SETPREFIX):
15049                         # support simple set operations (intersection, difference and union)
15050                         # on the commandline. Expressions are evaluated strictly left-to-right
15051                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15052                                 expression = a[len(SETPREFIX):]
15053                                 expr_sets = []
15054                                 expr_ops = []
15055                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15056                                         is_pos = expression.rfind(IS_OPERATOR)
15057                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15058                                         union_pos = expression.rfind(UNION_OPERATOR)
15059                                         op_pos = max(is_pos, diff_pos, union_pos)
15060                                         s1 = expression[:op_pos]
15061                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15062                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15063                                         if not s2 in sets:
15064                                                 display_missing_pkg_set(root_config, s2)
15065                                                 return (None, 1)
15066                                         expr_sets.insert(0, s2)
15067                                         expr_ops.insert(0, op)
15068                                         expression = s1
15069                                 if not expression in sets:
15070                                         display_missing_pkg_set(root_config, expression)
15071                                         return (None, 1)
15072                                 expr_sets.insert(0, expression)
15073                                 result = set(setconfig.getSetAtoms(expression))
15074                                 for i in range(0, len(expr_ops)):
15075                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15076                                         if expr_ops[i] == IS_OPERATOR:
15077                                                 result.intersection_update(s2)
15078                                         elif expr_ops[i] == DIFF_OPERATOR:
15079                                                 result.difference_update(s2)
15080                                         elif expr_ops[i] == UNION_OPERATOR:
15081                                                 result.update(s2)
15082                                         else:
15083                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15084                                 newargs.extend(result)
15085                         else:                   
15086                                 s = a[len(SETPREFIX):]
15087                                 if s not in sets:
15088                                         display_missing_pkg_set(root_config, s)
15089                                         return (None, 1)
15090                                 setconfig.active.append(s)
15091                                 try:
15092                                         set_atoms = setconfig.getSetAtoms(s)
15093                                 except portage.exception.PackageSetNotFound, e:
15094                                         writemsg_level(("emerge: the given set '%s' " + \
15095                                                 "contains a non-existent set named '%s'.\n") % \
15096                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15097                                         return (None, 1)
15098                                 if myaction in unmerge_actions and \
15099                                                 not sets[s].supportsOperation("unmerge"):
15100                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15101                                                 "not support unmerge operations\n")
15102                                         retval = 1
15103                                 elif not set_atoms:
15104                                         print "emerge: '%s' is an empty set" % s
15105                                 elif myaction not in do_not_expand:
15106                                         newargs.extend(set_atoms)
15107                                 else:
15108                                         newargs.append(SETPREFIX+s)
15109                                 for e in sets[s].errors:
15110                                         print e
15111                 else:
15112                         newargs.append(a)
15113         return (newargs, retval)
15114
15115 def repo_name_check(trees):
15116         missing_repo_names = set()
15117         for root, root_trees in trees.iteritems():
15118                 if "porttree" in root_trees:
15119                         portdb = root_trees["porttree"].dbapi
15120                         missing_repo_names.update(portdb.porttrees)
15121                         repos = portdb.getRepositories()
15122                         for r in repos:
15123                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15124                         if portdb.porttree_root in missing_repo_names and \
15125                                 not os.path.exists(os.path.join(
15126                                 portdb.porttree_root, "profiles")):
15127                                 # This is normal if $PORTDIR happens to be empty,
15128                                 # so don't warn about it.
15129                                 missing_repo_names.remove(portdb.porttree_root)
15130
15131         if missing_repo_names:
15132                 msg = []
15133                 msg.append("WARNING: One or more repositories " + \
15134                         "have missing repo_name entries:")
15135                 msg.append("")
15136                 for p in missing_repo_names:
15137                         msg.append("\t%s/profiles/repo_name" % (p,))
15138                 msg.append("")
15139                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15140                         "should be a plain text file containing a unique " + \
15141                         "name for the repository on the first line.", 70))
15142                 writemsg_level("".join("%s\n" % l for l in msg),
15143                         level=logging.WARNING, noiselevel=-1)
15144
15145         return bool(missing_repo_names)
15146
15147 def config_protect_check(trees):
15148         for root, root_trees in trees.iteritems():
15149                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15150                         msg = "!!! CONFIG_PROTECT is empty"
15151                         if root != "/":
15152                                 msg += " for '%s'" % root
15153                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15154
15155 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15156
15157         if "--quiet" in myopts:
15158                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15159                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15160                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15161                         print "    " + colorize("INFORM", cp)
15162                 return
15163
15164         s = search(root_config, spinner, "--searchdesc" in myopts,
15165                 "--quiet" not in myopts, "--usepkg" in myopts,
15166                 "--usepkgonly" in myopts)
15167         null_cp = portage.dep_getkey(insert_category_into_atom(
15168                 arg, "null"))
15169         cat, atom_pn = portage.catsplit(null_cp)
15170         s.searchkey = atom_pn
15171         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15172                 s.addCP(cp)
15173         s.output()
15174         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15175         print "!!! one of the above fully-qualified ebuild names instead.\n"
15176
15177 def profile_check(trees, myaction, myopts):
15178         if myaction in ("info", "sync"):
15179                 return os.EX_OK
15180         elif "--version" in myopts or "--help" in myopts:
15181                 return os.EX_OK
15182         for root, root_trees in trees.iteritems():
15183                 if root_trees["root_config"].settings.profiles:
15184                         continue
15185                 # generate some profile related warning messages
15186                 validate_ebuild_environment(trees)
15187                 msg = "If you have just changed your profile configuration, you " + \
15188                         "should revert back to the previous configuration. Due to " + \
15189                         "your current profile being invalid, allowed actions are " + \
15190                         "limited to --help, --info, --sync, and --version."
15191                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15192                         level=logging.ERROR, noiselevel=-1)
15193                 return 1
15194         return os.EX_OK
15195
15196 def emerge_main():
15197         global portage  # NFC why this is necessary now - genone
15198         portage._disable_legacy_globals()
15199         # Disable color until we're sure that it should be enabled (after
15200         # EMERGE_DEFAULT_OPTS has been parsed).
15201         portage.output.havecolor = 0
15202         # This first pass is just for options that need to be known as early as
15203         # possible, such as --config-root.  They will be parsed again later,
15204         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15205         # the value of --config-root).
15206         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15207         if "--debug" in myopts:
15208                 os.environ["PORTAGE_DEBUG"] = "1"
15209         if "--config-root" in myopts:
15210                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15211
15212         # Portage needs to ensure a sane umask for the files it creates.
15213         os.umask(022)
15214         settings, trees, mtimedb = load_emerge_config()
15215         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15216         rval = profile_check(trees, myaction, myopts)
15217         if rval != os.EX_OK:
15218                 return rval
15219
15220         if portage._global_updates(trees, mtimedb["updates"]):
15221                 mtimedb.commit()
15222                 # Reload the whole config from scratch.
15223                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15224                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15225
15226         xterm_titles = "notitles" not in settings.features
15227
15228         tmpcmdline = []
15229         if "--ignore-default-opts" not in myopts:
15230                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15231         tmpcmdline.extend(sys.argv[1:])
15232         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15233
15234         if "--digest" in myopts:
15235                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15236                 # Reload the whole config from scratch so that the portdbapi internal
15237                 # config is updated with new FEATURES.
15238                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15239                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15240
15241         for myroot in trees:
15242                 mysettings =  trees[myroot]["vartree"].settings
15243                 mysettings.unlock()
15244                 adjust_config(myopts, mysettings)
15245                 if "--pretend" not in myopts:
15246                         mysettings["PORTAGE_COUNTER_HASH"] = \
15247                                 trees[myroot]["vartree"].dbapi._counter_hash()
15248                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15249                 mysettings.lock()
15250                 del myroot, mysettings
15251
15252         apply_priorities(settings)
15253
15254         spinner = stdout_spinner()
15255         if "candy" in settings.features:
15256                 spinner.update = spinner.update_scroll
15257
15258         if "--quiet" not in myopts:
15259                 portage.deprecated_profile_check(settings=settings)
15260                 repo_name_check(trees)
15261                 config_protect_check(trees)
15262
15263         eclasses_overridden = {}
15264         for mytrees in trees.itervalues():
15265                 mydb = mytrees["porttree"].dbapi
15266                 # Freeze the portdbapi for performance (memoize all xmatch results).
15267                 mydb.freeze()
15268                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15269         del mytrees, mydb
15270
15271         if eclasses_overridden and \
15272                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15273                 prefix = bad(" * ")
15274                 if len(eclasses_overridden) == 1:
15275                         writemsg(prefix + "Overlay eclass overrides " + \
15276                                 "eclass from PORTDIR:\n", noiselevel=-1)
15277                 else:
15278                         writemsg(prefix + "Overlay eclasses override " + \
15279                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15280                 writemsg(prefix + "\n", noiselevel=-1)
15281                 for eclass_name in sorted(eclasses_overridden):
15282                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15283                                 (eclasses_overridden[eclass_name], eclass_name),
15284                                 noiselevel=-1)
15285                 writemsg(prefix + "\n", noiselevel=-1)
15286                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15287                 "because it will trigger invalidation of cached ebuild metadata " + \
15288                 "that is distributed with the portage tree. If you must " + \
15289                 "override eclasses from PORTDIR then you are advised to add " + \
15290                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15291                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15292                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15293                 "you would like to disable this warning."
15294                 from textwrap import wrap
15295                 for line in wrap(msg, 72):
15296                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15297
15298         if "moo" in myfiles:
15299                 print """
15300
15301   Larry loves Gentoo (""" + platform.system() + """)
15302
15303  _______________________
15304 < Have you mooed today? >
15305  -----------------------
15306         \   ^__^
15307          \  (oo)\_______
15308             (__)\       )\/\ 
15309                 ||----w |
15310                 ||     ||
15311
15312 """
15313
15314         for x in myfiles:
15315                 ext = os.path.splitext(x)[1]
15316                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15317                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15318                         break
15319
15320         root_config = trees[settings["ROOT"]]["root_config"]
15321         if myaction == "list-sets":
15322                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15323                 sys.stdout.flush()
15324                 return os.EX_OK
15325
15326         # only expand sets for actions taking package arguments
15327         oldargs = myfiles[:]
15328         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15329                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15330                 if retval != os.EX_OK:
15331                         return retval
15332
15333                 # Need to handle empty sets specially, otherwise emerge will react 
15334                 # with the help message for empty argument lists
15335                 if oldargs and not myfiles:
15336                         print "emerge: no targets left after set expansion"
15337                         return 0
15338
15339         if ("--tree" in myopts) and ("--columns" in myopts):
15340                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15341                 return 1
15342
15343         if ("--quiet" in myopts):
15344                 spinner.update = spinner.update_quiet
15345                 portage.util.noiselimit = -1
15346
15347         # Always create packages if FEATURES=buildpkg
15348         # Imply --buildpkg if --buildpkgonly
15349         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15350                 if "--buildpkg" not in myopts:
15351                         myopts["--buildpkg"] = True
15352
15353         # Also allow -S to invoke search action (-sS)
15354         if ("--searchdesc" in myopts):
15355                 if myaction and myaction != "search":
15356                         myfiles.append(myaction)
15357                 if "--search" not in myopts:
15358                         myopts["--search"] = True
15359                 myaction = "search"
15360
15361         # Always try and fetch binary packages if FEATURES=getbinpkg
15362         if ("getbinpkg" in settings.features):
15363                 myopts["--getbinpkg"] = True
15364
15365         if "--buildpkgonly" in myopts:
15366                 # --buildpkgonly will not merge anything, so
15367                 # it cancels all binary package options.
15368                 for opt in ("--getbinpkg", "--getbinpkgonly",
15369                         "--usepkg", "--usepkgonly"):
15370                         myopts.pop(opt, None)
15371
15372         if "--fetch-all-uri" in myopts:
15373                 myopts["--fetchonly"] = True
15374
15375         if "--skipfirst" in myopts and "--resume" not in myopts:
15376                 myopts["--resume"] = True
15377
15378         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15379                 myopts["--usepkgonly"] = True
15380
15381         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15382                 myopts["--getbinpkg"] = True
15383
15384         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15385                 myopts["--usepkg"] = True
15386
15387         # Also allow -K to apply --usepkg/-k
15388         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15389                 myopts["--usepkg"] = True
15390
15391         # Allow -p to remove --ask
15392         if ("--pretend" in myopts) and ("--ask" in myopts):
15393                 print ">>> --pretend disables --ask... removing --ask from options."
15394                 del myopts["--ask"]
15395
15396         # forbid --ask when not in a terminal
15397         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15398         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15399                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15400                         noiselevel=-1)
15401                 return 1
15402
15403         if settings.get("PORTAGE_DEBUG", "") == "1":
15404                 spinner.update = spinner.update_quiet
15405                 portage.debug=1
15406                 if "python-trace" in settings.features:
15407                         import portage.debug
15408                         portage.debug.set_trace(True)
15409
15410         if not ("--quiet" in myopts):
15411                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15412                         spinner.update = spinner.update_basic
15413
15414         if "--version" in myopts:
15415                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15416                         settings.profile_path, settings["CHOST"],
15417                         trees[settings["ROOT"]]["vartree"].dbapi)
15418                 return 0
15419         elif "--help" in myopts:
15420                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15421                 return 0
15422
15423         if "--debug" in myopts:
15424                 print "myaction", myaction
15425                 print "myopts", myopts
15426
15427         if not myaction and not myfiles and "--resume" not in myopts:
15428                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15429                 return 1
15430
15431         pretend = "--pretend" in myopts
15432         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15433         buildpkgonly = "--buildpkgonly" in myopts
15434
15435         # check if root user is the current user for the actions where emerge needs this
15436         if portage.secpass < 2:
15437                 # We've already allowed "--version" and "--help" above.
15438                 if "--pretend" not in myopts and myaction not in ("search","info"):
15439                         need_superuser = not \
15440                                 (fetchonly or \
15441                                 (buildpkgonly and secpass >= 1) or \
15442                                 myaction in ("metadata", "regen") or \
15443                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15444                         if portage.secpass < 1 or \
15445                                 need_superuser:
15446                                 if need_superuser:
15447                                         access_desc = "superuser"
15448                                 else:
15449                                         access_desc = "portage group"
15450                                 # Always show portage_group_warning() when only portage group
15451                                 # access is required but the user is not in the portage group.
15452                                 from portage.data import portage_group_warning
15453                                 if "--ask" in myopts:
15454                                         myopts["--pretend"] = True
15455                                         del myopts["--ask"]
15456                                         print ("%s access is required... " + \
15457                                                 "adding --pretend to options.\n") % access_desc
15458                                         if portage.secpass < 1 and not need_superuser:
15459                                                 portage_group_warning()
15460                                 else:
15461                                         sys.stderr.write(("emerge: %s access is " + \
15462                                                 "required.\n\n") % access_desc)
15463                                         if portage.secpass < 1 and not need_superuser:
15464                                                 portage_group_warning()
15465                                         return 1
15466
15467         disable_emergelog = False
15468         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15469                 if x in myopts:
15470                         disable_emergelog = True
15471                         break
15472         if myaction in ("search", "info"):
15473                 disable_emergelog = True
15474         if disable_emergelog:
15475                 """ Disable emergelog for everything except build or unmerge
15476                 operations.  This helps minimize parallel emerge.log entries that can
15477                 confuse log parsers.  We especially want it disabled during
15478                 parallel-fetch, which uses --resume --fetchonly."""
15479                 global emergelog
15480                 def emergelog(*pargs, **kargs):
15481                         pass
15482
15483         if not "--pretend" in myopts:
15484                 emergelog(xterm_titles, "Started emerge on: "+\
15485                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15486                 myelogstr=""
15487                 if myopts:
15488                         myelogstr=" ".join(myopts)
15489                 if myaction:
15490                         myelogstr+=" "+myaction
15491                 if myfiles:
15492                         myelogstr += " " + " ".join(oldargs)
15493                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15494         del oldargs
15495
15496         def emergeexitsig(signum, frame):
15497                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15498                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15499                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15500                 sys.exit(100+signum)
15501         signal.signal(signal.SIGINT, emergeexitsig)
15502         signal.signal(signal.SIGTERM, emergeexitsig)
15503
15504         def emergeexit():
15505                 """This gets out final log message in before we quit."""
15506                 if "--pretend" not in myopts:
15507                         emergelog(xterm_titles, " *** terminating.")
15508                 if "notitles" not in settings.features:
15509                         xtermTitleReset()
15510         portage.atexit_register(emergeexit)
15511
15512         if myaction in ("config", "metadata", "regen", "sync"):
15513                 if "--pretend" in myopts:
15514                         sys.stderr.write(("emerge: The '%s' action does " + \
15515                                 "not support '--pretend'.\n") % myaction)
15516                         return 1
15517
15518         if "sync" == myaction:
15519                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15520         elif "metadata" == myaction:
15521                 action_metadata(settings, portdb, myopts)
15522         elif myaction=="regen":
15523                 validate_ebuild_environment(trees)
15524                 return action_regen(settings, portdb, myopts.get("--jobs"),
15525                         myopts.get("--load-average"))
15526         # HELP action
15527         elif "config"==myaction:
15528                 validate_ebuild_environment(trees)
15529                 action_config(settings, trees, myopts, myfiles)
15530
15531         # SEARCH action
15532         elif "search"==myaction:
15533                 validate_ebuild_environment(trees)
15534                 action_search(trees[settings["ROOT"]]["root_config"],
15535                         myopts, myfiles, spinner)
15536         elif myaction in ("clean", "unmerge") or \
15537                 (myaction == "prune" and "--nodeps" in myopts):
15538                 validate_ebuild_environment(trees)
15539
15540                 # Ensure atoms are valid before calling unmerge().
15541                 # For backward compat, leading '=' is not required.
15542                 for x in myfiles:
15543                         if is_valid_package_atom(x) or \
15544                                 is_valid_package_atom("=" + x):
15545                                 continue
15546                         msg = []
15547                         msg.append("'%s' is not a valid package atom." % (x,))
15548                         msg.append("Please check ebuild(5) for full details.")
15549                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15550                                 level=logging.ERROR, noiselevel=-1)
15551                         return 1
15552
15553                 # When given a list of atoms, unmerge
15554                 # them in the order given.
15555                 ordered = myaction == "unmerge"
15556                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15557                         mtimedb["ldpath"], ordered=ordered):
15558                         if not (buildpkgonly or fetchonly or pretend):
15559                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15560
15561         elif myaction in ("depclean", "info", "prune"):
15562
15563                 # Ensure atoms are valid before calling unmerge().
15564                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15565                 valid_atoms = []
15566                 for x in myfiles:
15567                         if is_valid_package_atom(x):
15568                                 try:
15569                                         valid_atoms.append(
15570                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15571                                 except portage.exception.AmbiguousPackageName, e:
15572                                         msg = "The short ebuild name \"" + x + \
15573                                                 "\" is ambiguous.  Please specify " + \
15574                                                 "one of the following " + \
15575                                                 "fully-qualified ebuild names instead:"
15576                                         for line in textwrap.wrap(msg, 70):
15577                                                 writemsg_level("!!! %s\n" % (line,),
15578                                                         level=logging.ERROR, noiselevel=-1)
15579                                         for i in e[0]:
15580                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15581                                                         level=logging.ERROR, noiselevel=-1)
15582                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15583                                         return 1
15584                                 continue
15585                         msg = []
15586                         msg.append("'%s' is not a valid package atom." % (x,))
15587                         msg.append("Please check ebuild(5) for full details.")
15588                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15589                                 level=logging.ERROR, noiselevel=-1)
15590                         return 1
15591
15592                 if myaction == "info":
15593                         return action_info(settings, trees, myopts, valid_atoms)
15594
15595                 validate_ebuild_environment(trees)
15596                 action_depclean(settings, trees, mtimedb["ldpath"],
15597                         myopts, myaction, valid_atoms, spinner)
15598                 if not (buildpkgonly or fetchonly or pretend):
15599                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15600         # "update", "system", or just process files:
15601         else:
15602                 validate_ebuild_environment(trees)
15603                 if "--pretend" not in myopts:
15604                         display_news_notification(root_config, myopts)
15605                 retval = action_build(settings, trees, mtimedb,
15606                         myopts, myaction, myfiles, spinner)
15607                 root_config = trees[settings["ROOT"]]["root_config"]
15608                 post_emerge(root_config, myopts, mtimedb, retval)
15609
15610                 return retval