Split out an action_uninstall() function to handle argument validation for
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if isinstance(mysize, basestring):
282                 return mysize
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 if setconfig is None:
774                         self.sets = {}
775                 else:
776                         self.sets = self.setconfig.getSets()
777                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
778
779 def create_world_atom(pkg, args_set, root_config):
780         """Create a new atom for the world file if one does not exist.  If the
781         argument atom is precise enough to identify a specific slot then a slot
782         atom will be returned. Atoms that are in the system set may also be stored
783         in world since system atoms can only match one slot while world atoms can
784         be greedy with respect to slots.  Unslotted system packages will not be
785         stored in world."""
786
787         arg_atom = args_set.findAtomForPackage(pkg)
788         if not arg_atom:
789                 return None
790         cp = portage.dep_getkey(arg_atom)
791         new_world_atom = cp
792         sets = root_config.sets
793         portdb = root_config.trees["porttree"].dbapi
794         vardb = root_config.trees["vartree"].dbapi
795         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796                 for cpv in portdb.match(cp))
797         slotted = len(available_slots) > 1 or \
798                 (len(available_slots) == 1 and "0" not in available_slots)
799         if not slotted:
800                 # check the vdb in case this is multislot
801                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802                         for cpv in vardb.match(cp))
803                 slotted = len(available_slots) > 1 or \
804                         (len(available_slots) == 1 and "0" not in available_slots)
805         if slotted and arg_atom != cp:
806                 # If the user gave a specific atom, store it as a
807                 # slot atom in the world file.
808                 slot_atom = pkg.slot_atom
809
810                 # For USE=multislot, there are a couple of cases to
811                 # handle here:
812                 #
813                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814                 #    unknown value, so just record an unslotted atom.
815                 #
816                 # 2) SLOT comes from an installed package and there is no
817                 #    matching SLOT in the portage tree.
818                 #
819                 # Make sure that the slot atom is available in either the
820                 # portdb or the vardb, since otherwise the user certainly
821                 # doesn't want the SLOT atom recorded in the world file
822                 # (case 1 above).  If it's only available in the vardb,
823                 # the user may be trying to prevent a USE=multislot
824                 # package from being removed by --depclean (case 2 above).
825
826                 mydb = portdb
827                 if not portdb.match(slot_atom):
828                         # SLOT seems to come from an installed multislot package
829                         mydb = vardb
830                 # If there is no installed package matching the SLOT atom,
831                 # it probably changed SLOT spontaneously due to USE=multislot,
832                 # so just record an unslotted atom.
833                 if vardb.match(slot_atom):
834                         # Now verify that the argument is precise
835                         # enough to identify a specific slot.
836                         matches = mydb.match(arg_atom)
837                         matched_slots = set()
838                         for cpv in matches:
839                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840                         if len(matched_slots) == 1:
841                                 new_world_atom = slot_atom
842
843         if new_world_atom == sets["world"].findAtomForPackage(pkg):
844                 # Both atoms would be identical, so there's nothing to add.
845                 return None
846         if not slotted:
847                 # Unlike world atoms, system atoms are not greedy for slots, so they
848                 # can't be safely excluded from world if they are slotted.
849                 system_atom = sets["system"].findAtomForPackage(pkg)
850                 if system_atom:
851                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
852                                 return None
853                         # System virtuals aren't safe to exclude from world since they can
854                         # match multiple old-style virtuals but only one of them will be
855                         # pulled in by update or depclean.
856                         providers = portdb.mysettings.getvirtuals().get(
857                                 portage.dep_getkey(system_atom))
858                         if providers and len(providers) == 1 and providers[0] == cp:
859                                 return None
860         return new_world_atom
861
862 def filter_iuse_defaults(iuse):
863         for flag in iuse:
864                 if flag.startswith("+") or flag.startswith("-"):
865                         yield flag[1:]
866                 else:
867                         yield flag
868
869 class SlotObject(object):
870         __slots__ = ("__weakref__",)
871
872         def __init__(self, **kwargs):
873                 classes = [self.__class__]
874                 while classes:
875                         c = classes.pop()
876                         if c is SlotObject:
877                                 continue
878                         classes.extend(c.__bases__)
879                         slots = getattr(c, "__slots__", None)
880                         if not slots:
881                                 continue
882                         for myattr in slots:
883                                 myvalue = kwargs.get(myattr, None)
884                                 setattr(self, myattr, myvalue)
885
886         def copy(self):
887                 """
888                 Create a new instance and copy all attributes
889                 defined from __slots__ (including those from
890                 inherited classes).
891                 """
892                 obj = self.__class__()
893
894                 classes = [self.__class__]
895                 while classes:
896                         c = classes.pop()
897                         if c is SlotObject:
898                                 continue
899                         classes.extend(c.__bases__)
900                         slots = getattr(c, "__slots__", None)
901                         if not slots:
902                                 continue
903                         for myattr in slots:
904                                 setattr(obj, myattr, getattr(self, myattr))
905
906                 return obj
907
908 class AbstractDepPriority(SlotObject):
909         __slots__ = ("buildtime", "runtime", "runtime_post")
910
911         def __lt__(self, other):
912                 return self.__int__() < other
913
914         def __le__(self, other):
915                 return self.__int__() <= other
916
917         def __eq__(self, other):
918                 return self.__int__() == other
919
920         def __ne__(self, other):
921                 return self.__int__() != other
922
923         def __gt__(self, other):
924                 return self.__int__() > other
925
926         def __ge__(self, other):
927                 return self.__int__() >= other
928
929         def copy(self):
930                 import copy
931                 return copy.copy(self)
932
933 class DepPriority(AbstractDepPriority):
934
935         __slots__ = ("satisfied", "optional", "rebuild")
936
937         def __int__(self):
938                 return 0
939
940         def __str__(self):
941                 if self.optional:
942                         return "optional"
943                 if self.buildtime:
944                         return "buildtime"
945                 if self.runtime:
946                         return "runtime"
947                 if self.runtime_post:
948                         return "runtime_post"
949                 return "soft"
950
951 class BlockerDepPriority(DepPriority):
952         __slots__ = ()
953         def __int__(self):
954                 return 0
955
956         def __str__(self):
957                 return 'blocker'
958
959 BlockerDepPriority.instance = BlockerDepPriority()
960
961 class UnmergeDepPriority(AbstractDepPriority):
962         __slots__ = ("optional", "satisfied",)
963         """
964         Combination of properties           Priority  Category
965
966         runtime                                0       HARD
967         runtime_post                          -1       HARD
968         buildtime                             -2       SOFT
969         (none of the above)                   -2       SOFT
970         """
971
972         MAX    =  0
973         SOFT   = -2
974         MIN    = -2
975
976         def __int__(self):
977                 if self.runtime:
978                         return 0
979                 if self.runtime_post:
980                         return -1
981                 if self.buildtime:
982                         return -2
983                 return -2
984
985         def __str__(self):
986                 myvalue = self.__int__()
987                 if myvalue > self.SOFT:
988                         return "hard"
989                 return "soft"
990
991 class DepPriorityNormalRange(object):
992         """
993         DepPriority properties              Index      Category
994
995         buildtime                                      HARD
996         runtime                                3       MEDIUM
997         runtime_post                           2       MEDIUM_SOFT
998         optional                               1       SOFT
999         (none of the above)                    0       NONE
1000         """
1001         MEDIUM      = 3
1002         MEDIUM_SOFT = 2
1003         SOFT        = 1
1004         NONE        = 0
1005
1006         @classmethod
1007         def _ignore_optional(cls, priority):
1008                 if priority.__class__ is not DepPriority:
1009                         return False
1010                 return bool(priority.optional)
1011
1012         @classmethod
1013         def _ignore_runtime_post(cls, priority):
1014                 if priority.__class__ is not DepPriority:
1015                         return False
1016                 return bool(priority.optional or priority.runtime_post)
1017
1018         @classmethod
1019         def _ignore_runtime(cls, priority):
1020                 if priority.__class__ is not DepPriority:
1021                         return False
1022                 return not priority.buildtime
1023
1024         ignore_medium      = _ignore_runtime
1025         ignore_medium_soft = _ignore_runtime_post
1026         ignore_soft        = _ignore_optional
1027
1028 DepPriorityNormalRange.ignore_priority = (
1029         None,
1030         DepPriorityNormalRange._ignore_optional,
1031         DepPriorityNormalRange._ignore_runtime_post,
1032         DepPriorityNormalRange._ignore_runtime
1033 )
1034
1035 class DepPrioritySatisfiedRange(object):
1036         """
1037         DepPriority                         Index      Category
1038
1039         not satisfied and buildtime                    HARD
1040         not satisfied and runtime              7       MEDIUM
1041         not satisfied and runtime_post         6       MEDIUM_SOFT
1042         satisfied and buildtime and rebuild    5       SOFT
1043         satisfied and buildtime                4       SOFT
1044         satisfied and runtime                  3       SOFT
1045         satisfied and runtime_post             2       SOFT
1046         optional                               1       SOFT
1047         (none of the above)                    0       NONE
1048         """
1049         MEDIUM      = 7
1050         MEDIUM_SOFT = 6
1051         SOFT        = 5
1052         NONE        = 0
1053
1054         @classmethod
1055         def _ignore_optional(cls, priority):
1056                 if priority.__class__ is not DepPriority:
1057                         return False
1058                 return bool(priority.optional)
1059
1060         @classmethod
1061         def _ignore_satisfied_runtime_post(cls, priority):
1062                 if priority.__class__ is not DepPriority:
1063                         return False
1064                 if priority.optional:
1065                         return True
1066                 if not priority.satisfied:
1067                         return False
1068                 return bool(priority.runtime_post)
1069
1070         @classmethod
1071         def _ignore_satisfied_runtime(cls, priority):
1072                 if priority.__class__ is not DepPriority:
1073                         return False
1074                 if priority.optional:
1075                         return True
1076                 if not priority.satisfied:
1077                         return False
1078                 return not priority.buildtime
1079
1080         @classmethod
1081         def _ignore_satisfied_buildtime(cls, priority):
1082                 if priority.__class__ is not DepPriority:
1083                         return False
1084                 if priority.optional:
1085                         return True
1086                 if not priority.satisfied:
1087                         return False
1088                 if priority.buildtime:
1089                         return not priority.rebuild
1090                 return True
1091
1092         @classmethod
1093         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094                 if priority.__class__ is not DepPriority:
1095                         return False
1096                 if priority.optional:
1097                         return True
1098                 return bool(priority.satisfied)
1099
1100         @classmethod
1101         def _ignore_runtime_post(cls, priority):
1102                 if priority.__class__ is not DepPriority:
1103                         return False
1104                 return bool(priority.optional or \
1105                         priority.satisfied or \
1106                         priority.runtime_post)
1107
1108         @classmethod
1109         def _ignore_runtime(cls, priority):
1110                 if priority.__class__ is not DepPriority:
1111                         return False
1112                 return bool(priority.satisfied or \
1113                         not priority.buildtime)
1114
1115         ignore_medium      = _ignore_runtime
1116         ignore_medium_soft = _ignore_runtime_post
1117         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1118
1119 DepPrioritySatisfiedRange.ignore_priority = (
1120         None,
1121         DepPrioritySatisfiedRange._ignore_optional,
1122         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126         DepPrioritySatisfiedRange._ignore_runtime_post,
1127         DepPrioritySatisfiedRange._ignore_runtime
1128 )
1129
1130 def _find_deep_system_runtime_deps(graph):
1131         deep_system_deps = set()
1132         node_stack = []
1133         for node in graph:
1134                 if not isinstance(node, Package) or \
1135                         node.operation == 'uninstall':
1136                         continue
1137                 if node.root_config.sets['system'].findAtomForPackage(node):
1138                         node_stack.append(node)
1139
1140         def ignore_priority(priority):
1141                 """
1142                 Ignore non-runtime priorities.
1143                 """
1144                 if isinstance(priority, DepPriority) and \
1145                         (priority.runtime or priority.runtime_post):
1146                         return False
1147                 return True
1148
1149         while node_stack:
1150                 node = node_stack.pop()
1151                 if node in deep_system_deps:
1152                         continue
1153                 deep_system_deps.add(node)
1154                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155                         if not isinstance(child, Package) or \
1156                                 child.operation == 'uninstall':
1157                                 continue
1158                         node_stack.append(child)
1159
1160         return deep_system_deps
1161
1162 class FakeVartree(portage.vartree):
1163         """This is implements an in-memory copy of a vartree instance that provides
1164         all the interfaces required for use by the depgraph.  The vardb is locked
1165         during the constructor call just long enough to read a copy of the
1166         installed package information.  This allows the depgraph to do it's
1167         dependency calculations without holding a lock on the vardb.  It also
1168         allows things like vardb global updates to be done in memory so that the
1169         user doesn't necessarily need write access to the vardb in cases where
1170         global updates are necessary (updates are performed when necessary if there
1171         is not a matching ebuild in the tree)."""
1172         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173                 self._root_config = root_config
1174                 if pkg_cache is None:
1175                         pkg_cache = {}
1176                 real_vartree = root_config.trees["vartree"]
1177                 portdb = root_config.trees["porttree"].dbapi
1178                 self.root = real_vartree.root
1179                 self.settings = real_vartree.settings
1180                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181                 if "_mtime_" not in mykeys:
1182                         mykeys.append("_mtime_")
1183                 self._db_keys = mykeys
1184                 self._pkg_cache = pkg_cache
1185                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1187                 try:
1188                         # At least the parent needs to exist for the lock file.
1189                         portage.util.ensure_dirs(vdb_path)
1190                 except portage.exception.PortageException:
1191                         pass
1192                 vdb_lock = None
1193                 try:
1194                         if acquire_lock and os.access(vdb_path, os.W_OK):
1195                                 vdb_lock = portage.locks.lockdir(vdb_path)
1196                         real_dbapi = real_vartree.dbapi
1197                         slot_counters = {}
1198                         for cpv in real_dbapi.cpv_all():
1199                                 cache_key = ("installed", self.root, cpv, "nomerge")
1200                                 pkg = self._pkg_cache.get(cache_key)
1201                                 if pkg is not None:
1202                                         metadata = pkg.metadata
1203                                 else:
1204                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205                                 myslot = metadata["SLOT"]
1206                                 mycp = portage.dep_getkey(cpv)
1207                                 myslot_atom = "%s:%s" % (mycp, myslot)
1208                                 try:
1209                                         mycounter = long(metadata["COUNTER"])
1210                                 except ValueError:
1211                                         mycounter = 0
1212                                         metadata["COUNTER"] = str(mycounter)
1213                                 other_counter = slot_counters.get(myslot_atom, None)
1214                                 if other_counter is not None:
1215                                         if other_counter > mycounter:
1216                                                 continue
1217                                 slot_counters[myslot_atom] = mycounter
1218                                 if pkg is None:
1219                                         pkg = Package(built=True, cpv=cpv,
1220                                                 installed=True, metadata=metadata,
1221                                                 root_config=root_config, type_name="installed")
1222                                 self._pkg_cache[pkg] = pkg
1223                                 self.dbapi.cpv_inject(pkg)
1224                         real_dbapi.flush_cache()
1225                 finally:
1226                         if vdb_lock:
1227                                 portage.locks.unlockdir(vdb_lock)
1228                 # Populate the old-style virtuals using the cached values.
1229                 if not self.settings.treeVirtuals:
1230                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231                                 portage.getCPFromCPV, self.get_all_provides())
1232
1233                 # Intialize variables needed for lazy cache pulls of the live ebuild
1234                 # metadata.  This ensures that the vardb lock is released ASAP, without
1235                 # being delayed in case cache generation is triggered.
1236                 self._aux_get = self.dbapi.aux_get
1237                 self.dbapi.aux_get = self._aux_get_wrapper
1238                 self._match = self.dbapi.match
1239                 self.dbapi.match = self._match_wrapper
1240                 self._aux_get_history = set()
1241                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242                 self._portdb = portdb
1243                 self._global_updates = None
1244
1245         def _match_wrapper(self, cpv, use_cache=1):
1246                 """
1247                 Make sure the metadata in Package instances gets updated for any
1248                 cpv that is returned from a match() call, since the metadata can
1249                 be accessed directly from the Package instance instead of via
1250                 aux_get().
1251                 """
1252                 matches = self._match(cpv, use_cache=use_cache)
1253                 for cpv in matches:
1254                         if cpv in self._aux_get_history:
1255                                 continue
1256                         self._aux_get_wrapper(cpv, [])
1257                 return matches
1258
1259         def _aux_get_wrapper(self, pkg, wants):
1260                 if pkg in self._aux_get_history:
1261                         return self._aux_get(pkg, wants)
1262                 self._aux_get_history.add(pkg)
1263                 try:
1264                         # Use the live ebuild metadata if possible.
1265                         live_metadata = dict(izip(self._portdb_keys,
1266                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1267                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1268                                 raise KeyError(pkg)
1269                         self.dbapi.aux_update(pkg, live_metadata)
1270                 except (KeyError, portage.exception.PortageException):
1271                         if self._global_updates is None:
1272                                 self._global_updates = \
1273                                         grab_global_updates(self._portdb.porttree_root)
1274                         perform_global_updates(
1275                                 pkg, self.dbapi, self._global_updates)
1276                 return self._aux_get(pkg, wants)
1277
1278         def sync(self, acquire_lock=1):
1279                 """
1280                 Call this method to synchronize state with the real vardb
1281                 after one or more packages may have been installed or
1282                 uninstalled.
1283                 """
1284                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1285                 try:
1286                         # At least the parent needs to exist for the lock file.
1287                         portage.util.ensure_dirs(vdb_path)
1288                 except portage.exception.PortageException:
1289                         pass
1290                 vdb_lock = None
1291                 try:
1292                         if acquire_lock and os.access(vdb_path, os.W_OK):
1293                                 vdb_lock = portage.locks.lockdir(vdb_path)
1294                         self._sync()
1295                 finally:
1296                         if vdb_lock:
1297                                 portage.locks.unlockdir(vdb_lock)
1298
1299         def _sync(self):
1300
1301                 real_vardb = self._root_config.trees["vartree"].dbapi
1302                 current_cpv_set = frozenset(real_vardb.cpv_all())
1303                 pkg_vardb = self.dbapi
1304                 aux_get_history = self._aux_get_history
1305
1306                 # Remove any packages that have been uninstalled.
1307                 for pkg in list(pkg_vardb):
1308                         if pkg.cpv not in current_cpv_set:
1309                                 pkg_vardb.cpv_remove(pkg)
1310                                 aux_get_history.discard(pkg.cpv)
1311
1312                 # Validate counters and timestamps.
1313                 slot_counters = {}
1314                 root = self.root
1315                 validation_keys = ["COUNTER", "_mtime_"]
1316                 for cpv in current_cpv_set:
1317
1318                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1319                         pkg = pkg_vardb.get(pkg_hash_key)
1320                         if pkg is not None:
1321                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1322                                 try:
1323                                         counter = long(counter)
1324                                 except ValueError:
1325                                         counter = 0
1326
1327                                 if counter != pkg.counter or \
1328                                         mtime != pkg.mtime:
1329                                         pkg_vardb.cpv_remove(pkg)
1330                                         aux_get_history.discard(pkg.cpv)
1331                                         pkg = None
1332
1333                         if pkg is None:
1334                                 pkg = self._pkg(cpv)
1335
1336                         other_counter = slot_counters.get(pkg.slot_atom)
1337                         if other_counter is not None:
1338                                 if other_counter > pkg.counter:
1339                                         continue
1340
1341                         slot_counters[pkg.slot_atom] = pkg.counter
1342                         pkg_vardb.cpv_inject(pkg)
1343
1344                 real_vardb.flush_cache()
1345
1346         def _pkg(self, cpv):
1347                 root_config = self._root_config
1348                 real_vardb = root_config.trees["vartree"].dbapi
1349                 pkg = Package(cpv=cpv, installed=True,
1350                         metadata=izip(self._db_keys,
1351                         real_vardb.aux_get(cpv, self._db_keys)),
1352                         root_config=root_config,
1353                         type_name="installed")
1354
1355                 try:
1356                         mycounter = long(pkg.metadata["COUNTER"])
1357                 except ValueError:
1358                         mycounter = 0
1359                         pkg.metadata["COUNTER"] = str(mycounter)
1360
1361                 return pkg
1362
1363 def grab_global_updates(portdir):
1364         from portage.update import grab_updates, parse_updates
1365         updpath = os.path.join(portdir, "profiles", "updates")
1366         try:
1367                 rawupdates = grab_updates(updpath)
1368         except portage.exception.DirectoryNotFound:
1369                 rawupdates = []
1370         upd_commands = []
1371         for mykey, mystat, mycontent in rawupdates:
1372                 commands, errors = parse_updates(mycontent)
1373                 upd_commands.extend(commands)
1374         return upd_commands
1375
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377         from portage.update import update_dbentries
1378         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380         updates = update_dbentries(mycommands, aux_dict)
1381         if updates:
1382                 mydb.aux_update(mycpv, updates)
1383
1384 def visible(pkgsettings, pkg):
1385         """
1386         Check if a package is visible. This can raise an InvalidDependString
1387         exception if LICENSE is invalid.
1388         TODO: optionally generate a list of masking reasons
1389         @rtype: Boolean
1390         @returns: True if the package is visible, False otherwise.
1391         """
1392         if not pkg.metadata["SLOT"]:
1393                 return False
1394         if not pkg.installed:
1395                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1396                         return False
1397         eapi = pkg.metadata["EAPI"]
1398         if not portage.eapi_is_supported(eapi):
1399                 return False
1400         if not pkg.installed:
1401                 if portage._eapi_is_deprecated(eapi):
1402                         return False
1403                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1404                         return False
1405         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1408                 return False
1409         try:
1410                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1411                         return False
1412         except portage.exception.InvalidDependString:
1413                 return False
1414         return True
1415
1416 def get_masking_status(pkg, pkgsettings, root_config):
1417
1418         mreasons = portage.getmaskingstatus(
1419                 pkg, settings=pkgsettings,
1420                 portdb=root_config.trees["porttree"].dbapi)
1421
1422         if not pkg.installed:
1423                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424                         mreasons.append("CHOST: %s" % \
1425                                 pkg.metadata["CHOST"])
1426
1427         if not pkg.metadata["SLOT"]:
1428                 mreasons.append("invalid: SLOT is undefined")
1429
1430         return mreasons
1431
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433         db, pkg_type, built, installed, db_keys):
1434         eapi_masked = False
1435         try:
1436                 metadata = dict(izip(db_keys,
1437                         db.aux_get(cpv, db_keys)))
1438         except KeyError:
1439                 metadata = None
1440         if metadata and not built:
1441                 pkgsettings.setcpv(cpv, mydb=metadata)
1442                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444         if metadata is None:
1445                 mreasons = ["corruption"]
1446         else:
1447                 eapi = metadata['EAPI']
1448                 if eapi[:1] == '-':
1449                         eapi = eapi[1:]
1450                 if not portage.eapi_is_supported(eapi):
1451                         mreasons = ['EAPI %s' % eapi]
1452                 else:
1453                         pkg = Package(type_name=pkg_type, root_config=root_config,
1454                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456         return metadata, mreasons
1457
1458 def show_masked_packages(masked_packages):
1459         shown_licenses = set()
1460         shown_comments = set()
1461         # Maybe there is both an ebuild and a binary. Only
1462         # show one of them to avoid redundant appearance.
1463         shown_cpvs = set()
1464         have_eapi_mask = False
1465         for (root_config, pkgsettings, cpv,
1466                 metadata, mreasons) in masked_packages:
1467                 if cpv in shown_cpvs:
1468                         continue
1469                 shown_cpvs.add(cpv)
1470                 comment, filename = None, None
1471                 if "package.mask" in mreasons:
1472                         comment, filename = \
1473                                 portage.getmaskingreason(
1474                                 cpv, metadata=metadata,
1475                                 settings=pkgsettings,
1476                                 portdb=root_config.trees["porttree"].dbapi,
1477                                 return_location=True)
1478                 missing_licenses = []
1479                 if metadata:
1480                         if not portage.eapi_is_supported(metadata["EAPI"]):
1481                                 have_eapi_mask = True
1482                         try:
1483                                 missing_licenses = \
1484                                         pkgsettings._getMissingLicenses(
1485                                                 cpv, metadata)
1486                         except portage.exception.InvalidDependString:
1487                                 # This will have already been reported
1488                                 # above via mreasons.
1489                                 pass
1490
1491                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492                 if comment and comment not in shown_comments:
1493                         print filename+":"
1494                         print comment
1495                         shown_comments.add(comment)
1496                 portdb = root_config.trees["porttree"].dbapi
1497                 for l in missing_licenses:
1498                         l_path = portdb.findLicensePath(l)
1499                         if l in shown_licenses:
1500                                 continue
1501                         msg = ("A copy of the '%s' license" + \
1502                         " is located at '%s'.") % (l, l_path)
1503                         print msg
1504                         print
1505                         shown_licenses.add(l)
1506         return have_eapi_mask
1507
1508 class Task(SlotObject):
1509         __slots__ = ("_hash_key", "_hash_value")
1510
1511         def _get_hash_key(self):
1512                 hash_key = getattr(self, "_hash_key", None)
1513                 if hash_key is None:
1514                         raise NotImplementedError(self)
1515                 return hash_key
1516
1517         def __eq__(self, other):
1518                 return self._get_hash_key() == other
1519
1520         def __ne__(self, other):
1521                 return self._get_hash_key() != other
1522
1523         def __hash__(self):
1524                 hash_value = getattr(self, "_hash_value", None)
1525                 if hash_value is None:
1526                         self._hash_value = hash(self._get_hash_key())
1527                 return self._hash_value
1528
1529         def __len__(self):
1530                 return len(self._get_hash_key())
1531
1532         def __getitem__(self, key):
1533                 return self._get_hash_key()[key]
1534
1535         def __iter__(self):
1536                 return iter(self._get_hash_key())
1537
1538         def __contains__(self, key):
1539                 return key in self._get_hash_key()
1540
1541         def __str__(self):
1542                 return str(self._get_hash_key())
1543
1544 class Blocker(Task):
1545
1546         __hash__ = Task.__hash__
1547         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1548
1549         def __init__(self, **kwargs):
1550                 Task.__init__(self, **kwargs)
1551                 self.cp = portage.dep_getkey(self.atom)
1552
1553         def _get_hash_key(self):
1554                 hash_key = getattr(self, "_hash_key", None)
1555                 if hash_key is None:
1556                         self._hash_key = \
1557                                 ("blocks", self.root, self.atom, self.eapi)
1558                 return self._hash_key
1559
1560 class Package(Task):
1561
1562         __hash__ = Task.__hash__
1563         __slots__ = ("built", "cpv", "depth",
1564                 "installed", "metadata", "onlydeps", "operation",
1565                 "root_config", "type_name",
1566                 "category", "counter", "cp", "cpv_split",
1567                 "inherited", "iuse", "mtime",
1568                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1569
1570         metadata_keys = [
1571                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572                 "INHERITED", "IUSE", "KEYWORDS",
1573                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1575
1576         def __init__(self, **kwargs):
1577                 Task.__init__(self, **kwargs)
1578                 self.root = self.root_config.root
1579                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580                 self.cp = portage.cpv_getkey(self.cpv)
1581                 slot = self.slot
1582                 if not slot:
1583                         # Avoid an InvalidAtom exception when creating slot_atom.
1584                         # This package instance will be masked due to empty SLOT.
1585                         slot = '0'
1586                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587                 self.category, self.pf = portage.catsplit(self.cpv)
1588                 self.cpv_split = portage.catpkgsplit(self.cpv)
1589                 self.pv_split = self.cpv_split[1:]
1590
1591         class _use(object):
1592
1593                 __slots__ = ("__weakref__", "enabled")
1594
1595                 def __init__(self, use):
1596                         self.enabled = frozenset(use)
1597
1598         class _iuse(object):
1599
1600                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1601
1602                 def __init__(self, tokens, iuse_implicit):
1603                         self.tokens = tuple(tokens)
1604                         self.iuse_implicit = iuse_implicit
1605                         enabled = []
1606                         disabled = []
1607                         other = []
1608                         for x in tokens:
1609                                 prefix = x[:1]
1610                                 if prefix == "+":
1611                                         enabled.append(x[1:])
1612                                 elif prefix == "-":
1613                                         disabled.append(x[1:])
1614                                 else:
1615                                         other.append(x)
1616                         self.enabled = frozenset(enabled)
1617                         self.disabled = frozenset(disabled)
1618                         self.all = frozenset(chain(enabled, disabled, other))
1619
1620                 def __getattribute__(self, name):
1621                         if name == "regex":
1622                                 try:
1623                                         return object.__getattribute__(self, "regex")
1624                                 except AttributeError:
1625                                         all = object.__getattribute__(self, "all")
1626                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627                                         # Escape anything except ".*" which is supposed
1628                                         # to pass through from _get_implicit_iuse()
1629                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630                                         regex = "^(%s)$" % "|".join(regex)
1631                                         regex = regex.replace("\\.\\*", ".*")
1632                                         self.regex = re.compile(regex)
1633                         return object.__getattribute__(self, name)
1634
1635         def _get_hash_key(self):
1636                 hash_key = getattr(self, "_hash_key", None)
1637                 if hash_key is None:
1638                         if self.operation is None:
1639                                 self.operation = "merge"
1640                                 if self.onlydeps or self.installed:
1641                                         self.operation = "nomerge"
1642                         self._hash_key = \
1643                                 (self.type_name, self.root, self.cpv, self.operation)
1644                 return self._hash_key
1645
1646         def __lt__(self, other):
1647                 if other.cp != self.cp:
1648                         return False
1649                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1650                         return True
1651                 return False
1652
1653         def __le__(self, other):
1654                 if other.cp != self.cp:
1655                         return False
1656                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1657                         return True
1658                 return False
1659
1660         def __gt__(self, other):
1661                 if other.cp != self.cp:
1662                         return False
1663                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1664                         return True
1665                 return False
1666
1667         def __ge__(self, other):
1668                 if other.cp != self.cp:
1669                         return False
1670                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1671                         return True
1672                 return False
1673
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675         if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1678
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1681
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1683         """
1684         Detect metadata updates and synchronize Package attributes.
1685         """
1686
1687         __slots__ = ("_pkg",)
1688         _wrapped_keys = frozenset(
1689                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1690
1691         def __init__(self, pkg, metadata):
1692                 _PackageMetadataWrapperBase.__init__(self)
1693                 self._pkg = pkg
1694                 self.update(metadata)
1695
1696         def __setitem__(self, k, v):
1697                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698                 if k in self._wrapped_keys:
1699                         getattr(self, "_set_" + k.lower())(k, v)
1700
1701         def _set_inherited(self, k, v):
1702                 if isinstance(v, basestring):
1703                         v = frozenset(v.split())
1704                 self._pkg.inherited = v
1705
1706         def _set_iuse(self, k, v):
1707                 self._pkg.iuse = self._pkg._iuse(
1708                         v.split(), self._pkg.root_config.iuse_implicit)
1709
1710         def _set_slot(self, k, v):
1711                 self._pkg.slot = v
1712
1713         def _set_use(self, k, v):
1714                 self._pkg.use = self._pkg._use(v.split())
1715
1716         def _set_counter(self, k, v):
1717                 if isinstance(v, basestring):
1718                         try:
1719                                 v = long(v.strip())
1720                         except ValueError:
1721                                 v = 0
1722                 self._pkg.counter = v
1723
1724         def _set__mtime_(self, k, v):
1725                 if isinstance(v, basestring):
1726                         try:
1727                                 v = long(v.strip())
1728                         except ValueError:
1729                                 v = 0
1730                 self._pkg.mtime = v
1731
1732 class EbuildFetchonly(SlotObject):
1733
1734         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1735
1736         def execute(self):
1737                 settings = self.settings
1738                 pkg = self.pkg
1739                 portdb = pkg.root_config.trees["porttree"].dbapi
1740                 ebuild_path = portdb.findname(pkg.cpv)
1741                 settings.setcpv(pkg)
1742                 debug = settings.get("PORTAGE_DEBUG") == "1"
1743                 use_cache = 1 # always true
1744                 portage.doebuild_environment(ebuild_path, "fetch",
1745                         settings["ROOT"], settings, debug, use_cache, portdb)
1746                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1747
1748                 if restrict_fetch:
1749                         rval = self._execute_with_builddir()
1750                 else:
1751                         rval = portage.doebuild(ebuild_path, "fetch",
1752                                 settings["ROOT"], settings, debug=debug,
1753                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754                                 mydbapi=portdb, tree="porttree")
1755
1756                         if rval != os.EX_OK:
1757                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758                                 eerror(msg, phase="unpack", key=pkg.cpv)
1759
1760                 return rval
1761
1762         def _execute_with_builddir(self):
1763                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764                 # ensuring sane $PWD (bug #239560) and storing elog
1765                 # messages. Use a private temp directory, in order
1766                 # to avoid locking the main one.
1767                 settings = self.settings
1768                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769                 from tempfile import mkdtemp
1770                 try:
1771                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1772                 except OSError, e:
1773                         if e.errno != portage.exception.PermissionDenied.errno:
1774                                 raise
1775                         raise portage.exception.PermissionDenied(global_tmpdir)
1776                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777                 settings.backup_changes("PORTAGE_TMPDIR")
1778                 try:
1779                         retval = self._execute()
1780                 finally:
1781                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1782                         settings.backup_changes("PORTAGE_TMPDIR")
1783                         shutil.rmtree(private_tmpdir)
1784                 return retval
1785
1786         def _execute(self):
1787                 settings = self.settings
1788                 pkg = self.pkg
1789                 root_config = pkg.root_config
1790                 portdb = root_config.trees["porttree"].dbapi
1791                 ebuild_path = portdb.findname(pkg.cpv)
1792                 debug = settings.get("PORTAGE_DEBUG") == "1"
1793                 retval = portage.doebuild(ebuild_path, "fetch",
1794                         self.settings["ROOT"], self.settings, debug=debug,
1795                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796                         mydbapi=portdb, tree="porttree")
1797
1798                 if retval != os.EX_OK:
1799                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800                         eerror(msg, phase="unpack", key=pkg.cpv)
1801
1802                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803                 return retval
1804
1805 class PollConstants(object):
1806
1807         """
1808         Provides POLL* constants that are equivalent to those from the
1809         select module, for use by PollSelectAdapter.
1810         """
1811
1812         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813         v = 1
1814         for k in names:
1815                 locals()[k] = getattr(select, k, v)
1816                 v *= 2
1817         del k, v
1818
1819 class AsynchronousTask(SlotObject):
1820         """
1821         Subclasses override _wait() and _poll() so that calls
1822         to public methods can be wrapped for implementing
1823         hooks such as exit listener notification.
1824
1825         Sublasses should call self.wait() to notify exit listeners after
1826         the task is complete and self.returncode has been set.
1827         """
1828
1829         __slots__ = ("background", "cancelled", "returncode") + \
1830                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1831
1832         def start(self):
1833                 """
1834                 Start an asynchronous task and then return as soon as possible.
1835                 """
1836                 self._start_hook()
1837                 self._start()
1838
1839         def _start(self):
1840                 raise NotImplementedError(self)
1841
1842         def isAlive(self):
1843                 return self.returncode is None
1844
1845         def poll(self):
1846                 self._wait_hook()
1847                 return self._poll()
1848
1849         def _poll(self):
1850                 return self.returncode
1851
1852         def wait(self):
1853                 if self.returncode is None:
1854                         self._wait()
1855                 self._wait_hook()
1856                 return self.returncode
1857
1858         def _wait(self):
1859                 return self.returncode
1860
1861         def cancel(self):
1862                 self.cancelled = True
1863                 self.wait()
1864
1865         def addStartListener(self, f):
1866                 """
1867                 The function will be called with one argument, a reference to self.
1868                 """
1869                 if self._start_listeners is None:
1870                         self._start_listeners = []
1871                 self._start_listeners.append(f)
1872
1873         def removeStartListener(self, f):
1874                 if self._start_listeners is None:
1875                         return
1876                 self._start_listeners.remove(f)
1877
1878         def _start_hook(self):
1879                 if self._start_listeners is not None:
1880                         start_listeners = self._start_listeners
1881                         self._start_listeners = None
1882
1883                         for f in start_listeners:
1884                                 f(self)
1885
1886         def addExitListener(self, f):
1887                 """
1888                 The function will be called with one argument, a reference to self.
1889                 """
1890                 if self._exit_listeners is None:
1891                         self._exit_listeners = []
1892                 self._exit_listeners.append(f)
1893
1894         def removeExitListener(self, f):
1895                 if self._exit_listeners is None:
1896                         if self._exit_listener_stack is not None:
1897                                 self._exit_listener_stack.remove(f)
1898                         return
1899                 self._exit_listeners.remove(f)
1900
1901         def _wait_hook(self):
1902                 """
1903                 Call this method after the task completes, just before returning
1904                 the returncode from wait() or poll(). This hook is
1905                 used to trigger exit listeners when the returncode first
1906                 becomes available.
1907                 """
1908                 if self.returncode is not None and \
1909                         self._exit_listeners is not None:
1910
1911                         # This prevents recursion, in case one of the
1912                         # exit handlers triggers this method again by
1913                         # calling wait(). Use a stack that gives
1914                         # removeExitListener() an opportunity to consume
1915                         # listeners from the stack, before they can get
1916                         # called below. This is necessary because a call
1917                         # to one exit listener may result in a call to
1918                         # removeExitListener() for another listener on
1919                         # the stack. That listener needs to be removed
1920                         # from the stack since it would be inconsistent
1921                         # to call it after it has been been passed into
1922                         # removeExitListener().
1923                         self._exit_listener_stack = self._exit_listeners
1924                         self._exit_listeners = None
1925
1926                         self._exit_listener_stack.reverse()
1927                         while self._exit_listener_stack:
1928                                 self._exit_listener_stack.pop()(self)
1929
1930 class AbstractPollTask(AsynchronousTask):
1931
1932         __slots__ = ("scheduler",) + \
1933                 ("_registered",)
1934
1935         _bufsize = 4096
1936         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938                 _exceptional_events
1939
1940         def _unregister(self):
1941                 raise NotImplementedError(self)
1942
1943         def _unregister_if_appropriate(self, event):
1944                 if self._registered:
1945                         if event & self._exceptional_events:
1946                                 self._unregister()
1947                                 self.cancel()
1948                         elif event & PollConstants.POLLHUP:
1949                                 self._unregister()
1950                                 self.wait()
1951
1952 class PipeReader(AbstractPollTask):
1953
1954         """
1955         Reads output from one or more files and saves it in memory,
1956         for retrieval via the getvalue() method. This is driven by
1957         the scheduler's poll() loop, so it runs entirely within the
1958         current process.
1959         """
1960
1961         __slots__ = ("input_files",) + \
1962                 ("_read_data", "_reg_ids")
1963
1964         def _start(self):
1965                 self._reg_ids = set()
1966                 self._read_data = []
1967                 for k, f in self.input_files.iteritems():
1968                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1971                                 self._registered_events, self._output_handler))
1972                 self._registered = True
1973
1974         def isAlive(self):
1975                 return self._registered
1976
1977         def cancel(self):
1978                 if self.returncode is None:
1979                         self.returncode = 1
1980                         self.cancelled = True
1981                 self.wait()
1982
1983         def _wait(self):
1984                 if self.returncode is not None:
1985                         return self.returncode
1986
1987                 if self._registered:
1988                         self.scheduler.schedule(self._reg_ids)
1989                         self._unregister()
1990
1991                 self.returncode = os.EX_OK
1992                 return self.returncode
1993
1994         def getvalue(self):
1995                 """Retrieve the entire contents"""
1996                 if sys.hexversion >= 0x3000000:
1997                         return bytes().join(self._read_data)
1998                 return "".join(self._read_data)
1999
2000         def close(self):
2001                 """Free the memory buffer."""
2002                 self._read_data = None
2003
2004         def _output_handler(self, fd, event):
2005
2006                 if event & PollConstants.POLLIN:
2007
2008                         for f in self.input_files.itervalues():
2009                                 if fd == f.fileno():
2010                                         break
2011
2012                         buf = array.array('B')
2013                         try:
2014                                 buf.fromfile(f, self._bufsize)
2015                         except EOFError:
2016                                 pass
2017
2018                         if buf:
2019                                 self._read_data.append(buf.tostring())
2020                         else:
2021                                 self._unregister()
2022                                 self.wait()
2023
2024                 self._unregister_if_appropriate(event)
2025                 return self._registered
2026
2027         def _unregister(self):
2028                 """
2029                 Unregister from the scheduler and close open files.
2030                 """
2031
2032                 self._registered = False
2033
2034                 if self._reg_ids is not None:
2035                         for reg_id in self._reg_ids:
2036                                 self.scheduler.unregister(reg_id)
2037                         self._reg_ids = None
2038
2039                 if self.input_files is not None:
2040                         for f in self.input_files.itervalues():
2041                                 f.close()
2042                         self.input_files = None
2043
2044 class CompositeTask(AsynchronousTask):
2045
2046         __slots__ = ("scheduler",) + ("_current_task",)
2047
2048         def isAlive(self):
2049                 return self._current_task is not None
2050
2051         def cancel(self):
2052                 self.cancelled = True
2053                 if self._current_task is not None:
2054                         self._current_task.cancel()
2055
2056         def _poll(self):
2057                 """
2058                 This does a loop calling self._current_task.poll()
2059                 repeatedly as long as the value of self._current_task
2060                 keeps changing. It calls poll() a maximum of one time
2061                 for a given self._current_task instance. This is useful
2062                 since calling poll() on a task can trigger advance to
2063                 the next task could eventually lead to the returncode
2064                 being set in cases when polling only a single task would
2065                 not have the same effect.
2066                 """
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None or task is prev:
2072                                 # don't poll the same task more than once
2073                                 break
2074                         task.poll()
2075                         prev = task
2076
2077                 return self.returncode
2078
2079         def _wait(self):
2080
2081                 prev = None
2082                 while True:
2083                         task = self._current_task
2084                         if task is None:
2085                                 # don't wait for the same task more than once
2086                                 break
2087                         if task is prev:
2088                                 # Before the task.wait() method returned, an exit
2089                                 # listener should have set self._current_task to either
2090                                 # a different task or None. Something is wrong.
2091                                 raise AssertionError("self._current_task has not " + \
2092                                         "changed since calling wait", self, task)
2093                         task.wait()
2094                         prev = task
2095
2096                 return self.returncode
2097
2098         def _assert_current(self, task):
2099                 """
2100                 Raises an AssertionError if the given task is not the
2101                 same one as self._current_task. This can be useful
2102                 for detecting bugs.
2103                 """
2104                 if task is not self._current_task:
2105                         raise AssertionError("Unrecognized task: %s" % (task,))
2106
2107         def _default_exit(self, task):
2108                 """
2109                 Calls _assert_current() on the given task and then sets the
2110                 composite returncode attribute if task.returncode != os.EX_OK.
2111                 If the task failed then self._current_task will be set to None.
2112                 Subclasses can use this as a generic task exit callback.
2113
2114                 @rtype: int
2115                 @returns: The task.returncode attribute.
2116                 """
2117                 self._assert_current(task)
2118                 if task.returncode != os.EX_OK:
2119                         self.returncode = task.returncode
2120                         self._current_task = None
2121                 return task.returncode
2122
2123         def _final_exit(self, task):
2124                 """
2125                 Assumes that task is the final task of this composite task.
2126                 Calls _default_exit() and sets self.returncode to the task's
2127                 returncode and sets self._current_task to None.
2128                 """
2129                 self._default_exit(task)
2130                 self._current_task = None
2131                 self.returncode = task.returncode
2132                 return self.returncode
2133
2134         def _default_final_exit(self, task):
2135                 """
2136                 This calls _final_exit() and then wait().
2137
2138                 Subclasses can use this as a generic final task exit callback.
2139
2140                 """
2141                 self._final_exit(task)
2142                 return self.wait()
2143
2144         def _start_task(self, task, exit_handler):
2145                 """
2146                 Register exit handler for the given task, set it
2147                 as self._current_task, and call task.start().
2148
2149                 Subclasses can use this as a generic way to start
2150                 a task.
2151
2152                 """
2153                 task.addExitListener(exit_handler)
2154                 self._current_task = task
2155                 task.start()
2156
2157 class TaskSequence(CompositeTask):
2158         """
2159         A collection of tasks that executes sequentially. Each task
2160         must have a addExitListener() method that can be used as
2161         a means to trigger movement from one task to the next.
2162         """
2163
2164         __slots__ = ("_task_queue",)
2165
2166         def __init__(self, **kwargs):
2167                 AsynchronousTask.__init__(self, **kwargs)
2168                 self._task_queue = deque()
2169
2170         def add(self, task):
2171                 self._task_queue.append(task)
2172
2173         def _start(self):
2174                 self._start_next_task()
2175
2176         def cancel(self):
2177                 self._task_queue.clear()
2178                 CompositeTask.cancel(self)
2179
2180         def _start_next_task(self):
2181                 self._start_task(self._task_queue.popleft(),
2182                         self._task_exit_handler)
2183
2184         def _task_exit_handler(self, task):
2185                 if self._default_exit(task) != os.EX_OK:
2186                         self.wait()
2187                 elif self._task_queue:
2188                         self._start_next_task()
2189                 else:
2190                         self._final_exit(task)
2191                         self.wait()
2192
2193 class SubProcess(AbstractPollTask):
2194
2195         __slots__ = ("pid",) + \
2196                 ("_files", "_reg_id")
2197
2198         # A file descriptor is required for the scheduler to monitor changes from
2199         # inside a poll() loop. When logging is not enabled, create a pipe just to
2200         # serve this purpose alone.
2201         _dummy_pipe_fd = 9
2202
2203         def _poll(self):
2204                 if self.returncode is not None:
2205                         return self.returncode
2206                 if self.pid is None:
2207                         return self.returncode
2208                 if self._registered:
2209                         return self.returncode
2210
2211                 try:
2212                         retval = os.waitpid(self.pid, os.WNOHANG)
2213                 except OSError, e:
2214                         if e.errno != errno.ECHILD:
2215                                 raise
2216                         del e
2217                         retval = (self.pid, 1)
2218
2219                 if retval == (0, 0):
2220                         return None
2221                 self._set_returncode(retval)
2222                 return self.returncode
2223
2224         def cancel(self):
2225                 if self.isAlive():
2226                         try:
2227                                 os.kill(self.pid, signal.SIGTERM)
2228                         except OSError, e:
2229                                 if e.errno != errno.ESRCH:
2230                                         raise
2231                                 del e
2232
2233                 self.cancelled = True
2234                 if self.pid is not None:
2235                         self.wait()
2236                 return self.returncode
2237
2238         def isAlive(self):
2239                 return self.pid is not None and \
2240                         self.returncode is None
2241
2242         def _wait(self):
2243
2244                 if self.returncode is not None:
2245                         return self.returncode
2246
2247                 if self._registered:
2248                         self.scheduler.schedule(self._reg_id)
2249                         self._unregister()
2250                         if self.returncode is not None:
2251                                 return self.returncode
2252
2253                 try:
2254                         wait_retval = os.waitpid(self.pid, 0)
2255                 except OSError, e:
2256                         if e.errno != errno.ECHILD:
2257                                 raise
2258                         del e
2259                         self._set_returncode((self.pid, 1))
2260                 else:
2261                         self._set_returncode(wait_retval)
2262
2263                 return self.returncode
2264
2265         def _unregister(self):
2266                 """
2267                 Unregister from the scheduler and close open files.
2268                 """
2269
2270                 self._registered = False
2271
2272                 if self._reg_id is not None:
2273                         self.scheduler.unregister(self._reg_id)
2274                         self._reg_id = None
2275
2276                 if self._files is not None:
2277                         for f in self._files.itervalues():
2278                                 f.close()
2279                         self._files = None
2280
2281         def _set_returncode(self, wait_retval):
2282
2283                 retval = wait_retval[1]
2284
2285                 if retval != os.EX_OK:
2286                         if retval & 0xff:
2287                                 retval = (retval & 0xff) << 8
2288                         else:
2289                                 retval = retval >> 8
2290
2291                 self.returncode = retval
2292
2293 class SpawnProcess(SubProcess):
2294
2295         """
2296         Constructor keyword args are passed into portage.process.spawn().
2297         The required "args" keyword argument will be passed as the first
2298         spawn() argument.
2299         """
2300
2301         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302                 "uid", "gid", "groups", "umask", "logfile",
2303                 "path_lookup", "pre_exec")
2304
2305         __slots__ = ("args",) + \
2306                 _spawn_kwarg_names
2307
2308         _file_names = ("log", "process", "stdout")
2309         _files_dict = slot_dict_class(_file_names, prefix="")
2310
2311         def _start(self):
2312
2313                 if self.cancelled:
2314                         return
2315
2316                 if self.fd_pipes is None:
2317                         self.fd_pipes = {}
2318                 fd_pipes = self.fd_pipes
2319                 fd_pipes.setdefault(0, sys.stdin.fileno())
2320                 fd_pipes.setdefault(1, sys.stdout.fileno())
2321                 fd_pipes.setdefault(2, sys.stderr.fileno())
2322
2323                 # flush any pending output
2324                 for fd in fd_pipes.itervalues():
2325                         if fd == sys.stdout.fileno():
2326                                 sys.stdout.flush()
2327                         if fd == sys.stderr.fileno():
2328                                 sys.stderr.flush()
2329
2330                 logfile = self.logfile
2331                 self._files = self._files_dict()
2332                 files = self._files
2333
2334                 master_fd, slave_fd = self._pipe(fd_pipes)
2335                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337
2338                 null_input = None
2339                 fd_pipes_orig = fd_pipes.copy()
2340                 if self.background:
2341                         # TODO: Use job control functions like tcsetpgrp() to control
2342                         # access to stdin. Until then, use /dev/null so that any
2343                         # attempts to read from stdin will immediately return EOF
2344                         # instead of blocking indefinitely.
2345                         null_input = open('/dev/null', 'rb')
2346                         fd_pipes[0] = null_input.fileno()
2347                 else:
2348                         fd_pipes[0] = fd_pipes_orig[0]
2349
2350                 files.process = os.fdopen(master_fd, 'rb')
2351                 if logfile is not None:
2352
2353                         fd_pipes[1] = slave_fd
2354                         fd_pipes[2] = slave_fd
2355
2356                         files.log = open(logfile, mode='ab')
2357                         portage.util.apply_secpass_permissions(logfile,
2358                                 uid=portage.portage_uid, gid=portage.portage_gid,
2359                                 mode=0660)
2360
2361                         if not self.background:
2362                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2363
2364                         output_handler = self._output_handler
2365
2366                 else:
2367
2368                         # Create a dummy pipe so the scheduler can monitor
2369                         # the process from inside a poll() loop.
2370                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2371                         if self.background:
2372                                 fd_pipes[1] = slave_fd
2373                                 fd_pipes[2] = slave_fd
2374                         output_handler = self._dummy_handler
2375
2376                 kwargs = {}
2377                 for k in self._spawn_kwarg_names:
2378                         v = getattr(self, k)
2379                         if v is not None:
2380                                 kwargs[k] = v
2381
2382                 kwargs["fd_pipes"] = fd_pipes
2383                 kwargs["returnpid"] = True
2384                 kwargs.pop("logfile", None)
2385
2386                 self._reg_id = self.scheduler.register(files.process.fileno(),
2387                         self._registered_events, output_handler)
2388                 self._registered = True
2389
2390                 retval = self._spawn(self.args, **kwargs)
2391
2392                 os.close(slave_fd)
2393                 if null_input is not None:
2394                         null_input.close()
2395
2396                 if isinstance(retval, int):
2397                         # spawn failed
2398                         self._unregister()
2399                         self.returncode = retval
2400                         self.wait()
2401                         return
2402
2403                 self.pid = retval[0]
2404                 portage.process.spawned_pids.remove(self.pid)
2405
2406         def _pipe(self, fd_pipes):
2407                 """
2408                 @type fd_pipes: dict
2409                 @param fd_pipes: pipes from which to copy terminal size if desired.
2410                 """
2411                 return os.pipe()
2412
2413         def _spawn(self, args, **kwargs):
2414                 return portage.process.spawn(args, **kwargs)
2415
2416         def _output_handler(self, fd, event):
2417
2418                 if event & PollConstants.POLLIN:
2419
2420                         files = self._files
2421                         buf = array.array('B')
2422                         try:
2423                                 buf.fromfile(files.process, self._bufsize)
2424                         except EOFError:
2425                                 pass
2426
2427                         if buf:
2428                                 if not self.background:
2429                                         write_successful = False
2430                                         failures = 0
2431                                         while True:
2432                                                 try:
2433                                                         if not write_successful:
2434                                                                 buf.tofile(files.stdout)
2435                                                                 write_successful = True
2436                                                         files.stdout.flush()
2437                                                         break
2438                                                 except IOError, e:
2439                                                         if e.errno != errno.EAGAIN:
2440                                                                 raise
2441                                                         del e
2442                                                         failures += 1
2443                                                         if failures > 50:
2444                                                                 # Avoid a potentially infinite loop. In
2445                                                                 # most cases, the failure count is zero
2446                                                                 # and it's unlikely to exceed 1.
2447                                                                 raise
2448
2449                                                         # This means that a subprocess has put an inherited
2450                                                         # stdio file descriptor (typically stdin) into
2451                                                         # O_NONBLOCK mode. This is not acceptable (see bug
2452                                                         # #264435), so revert it. We need to use a loop
2453                                                         # here since there's a race condition due to
2454                                                         # parallel processes being able to change the
2455                                                         # flags on the inherited file descriptor.
2456                                                         # TODO: When possible, avoid having child processes
2457                                                         # inherit stdio file descriptors from portage
2458                                                         # (maybe it can't be avoided with
2459                                                         # PROPERTIES=interactive).
2460                                                         fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2461                                                                 fcntl.fcntl(files.stdout.fileno(),
2462                                                                 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2463
2464                                 buf.tofile(files.log)
2465                                 files.log.flush()
2466                         else:
2467                                 self._unregister()
2468                                 self.wait()
2469
2470                 self._unregister_if_appropriate(event)
2471                 return self._registered
2472
2473         def _dummy_handler(self, fd, event):
2474                 """
2475                 This method is mainly interested in detecting EOF, since
2476                 the only purpose of the pipe is to allow the scheduler to
2477                 monitor the process from inside a poll() loop.
2478                 """
2479
2480                 if event & PollConstants.POLLIN:
2481
2482                         buf = array.array('B')
2483                         try:
2484                                 buf.fromfile(self._files.process, self._bufsize)
2485                         except EOFError:
2486                                 pass
2487
2488                         if buf:
2489                                 pass
2490                         else:
2491                                 self._unregister()
2492                                 self.wait()
2493
2494                 self._unregister_if_appropriate(event)
2495                 return self._registered
2496
2497 class MiscFunctionsProcess(SpawnProcess):
2498         """
2499         Spawns misc-functions.sh with an existing ebuild environment.
2500         """
2501
2502         __slots__ = ("commands", "phase", "pkg", "settings")
2503
2504         def _start(self):
2505                 settings = self.settings
2506                 settings.pop("EBUILD_PHASE", None)
2507                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2508                 misc_sh_binary = os.path.join(portage_bin_path,
2509                         os.path.basename(portage.const.MISC_SH_BINARY))
2510
2511                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2512                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2513
2514                 portage._doebuild_exit_status_unlink(
2515                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2516
2517                 SpawnProcess._start(self)
2518
2519         def _spawn(self, args, **kwargs):
2520                 settings = self.settings
2521                 debug = settings.get("PORTAGE_DEBUG") == "1"
2522                 return portage.spawn(" ".join(args), settings,
2523                         debug=debug, **kwargs)
2524
2525         def _set_returncode(self, wait_retval):
2526                 SpawnProcess._set_returncode(self, wait_retval)
2527                 self.returncode = portage._doebuild_exit_status_check_and_log(
2528                         self.settings, self.phase, self.returncode)
2529
2530 class EbuildFetcher(SpawnProcess):
2531
2532         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2533                 ("_build_dir",)
2534
2535         def _start(self):
2536
2537                 root_config = self.pkg.root_config
2538                 portdb = root_config.trees["porttree"].dbapi
2539                 ebuild_path = portdb.findname(self.pkg.cpv)
2540                 settings = self.config_pool.allocate()
2541                 settings.setcpv(self.pkg)
2542
2543                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2544                 # should not be touched since otherwise it could interfere with
2545                 # another instance of the same cpv concurrently being built for a
2546                 # different $ROOT (currently, builds only cooperate with prefetchers
2547                 # that are spawned for the same $ROOT).
2548                 if not self.prefetch:
2549                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2550                         self._build_dir.lock()
2551                         self._build_dir.clean_log()
2552                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2553                         if self.logfile is None:
2554                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2555
2556                 phase = "fetch"
2557                 if self.fetchall:
2558                         phase = "fetchall"
2559
2560                 # If any incremental variables have been overridden
2561                 # via the environment, those values need to be passed
2562                 # along here so that they are correctly considered by
2563                 # the config instance in the subproccess.
2564                 fetch_env = os.environ.copy()
2565
2566                 nocolor = settings.get("NOCOLOR")
2567                 if nocolor is not None:
2568                         fetch_env["NOCOLOR"] = nocolor
2569
2570                 fetch_env["PORTAGE_NICENESS"] = "0"
2571                 if self.prefetch:
2572                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2573
2574                 ebuild_binary = os.path.join(
2575                         settings["PORTAGE_BIN_PATH"], "ebuild")
2576
2577                 fetch_args = [ebuild_binary, ebuild_path, phase]
2578                 debug = settings.get("PORTAGE_DEBUG") == "1"
2579                 if debug:
2580                         fetch_args.append("--debug")
2581
2582                 self.args = fetch_args
2583                 self.env = fetch_env
2584                 SpawnProcess._start(self)
2585
2586         def _pipe(self, fd_pipes):
2587                 """When appropriate, use a pty so that fetcher progress bars,
2588                 like wget has, will work properly."""
2589                 if self.background or not sys.stdout.isatty():
2590                         # When the output only goes to a log file,
2591                         # there's no point in creating a pty.
2592                         return os.pipe()
2593                 stdout_pipe = fd_pipes.get(1)
2594                 got_pty, master_fd, slave_fd = \
2595                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2596                 return (master_fd, slave_fd)
2597
2598         def _set_returncode(self, wait_retval):
2599                 SpawnProcess._set_returncode(self, wait_retval)
2600                 # Collect elog messages that might have been
2601                 # created by the pkg_nofetch phase.
2602                 if self._build_dir is not None:
2603                         # Skip elog messages for prefetch, in order to avoid duplicates.
2604                         if not self.prefetch and self.returncode != os.EX_OK:
2605                                 elog_out = None
2606                                 if self.logfile is not None:
2607                                         if self.background:
2608                                                 elog_out = open(self.logfile, 'a')
2609                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2610                                 if self.logfile is not None:
2611                                         msg += ", Log file:"
2612                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2613                                 if self.logfile is not None:
2614                                         eerror(" '%s'" % (self.logfile,),
2615                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2616                                 if elog_out is not None:
2617                                         elog_out.close()
2618                         if not self.prefetch:
2619                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2620                         features = self._build_dir.settings.features
2621                         if self.returncode == os.EX_OK:
2622                                 self._build_dir.clean_log()
2623                         self._build_dir.unlock()
2624                         self.config_pool.deallocate(self._build_dir.settings)
2625                         self._build_dir = None
2626
2627 class EbuildBuildDir(SlotObject):
2628
2629         __slots__ = ("dir_path", "pkg", "settings",
2630                 "locked", "_catdir", "_lock_obj")
2631
2632         def __init__(self, **kwargs):
2633                 SlotObject.__init__(self, **kwargs)
2634                 self.locked = False
2635
2636         def lock(self):
2637                 """
2638                 This raises an AlreadyLocked exception if lock() is called
2639                 while a lock is already held. In order to avoid this, call
2640                 unlock() or check whether the "locked" attribute is True
2641                 or False before calling lock().
2642                 """
2643                 if self._lock_obj is not None:
2644                         raise self.AlreadyLocked((self._lock_obj,))
2645
2646                 dir_path = self.dir_path
2647                 if dir_path is None:
2648                         root_config = self.pkg.root_config
2649                         portdb = root_config.trees["porttree"].dbapi
2650                         ebuild_path = portdb.findname(self.pkg.cpv)
2651                         settings = self.settings
2652                         settings.setcpv(self.pkg)
2653                         debug = settings.get("PORTAGE_DEBUG") == "1"
2654                         use_cache = 1 # always true
2655                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2656                                 self.settings, debug, use_cache, portdb)
2657                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2658
2659                 catdir = os.path.dirname(dir_path)
2660                 self._catdir = catdir
2661
2662                 portage.util.ensure_dirs(os.path.dirname(catdir),
2663                         gid=portage.portage_gid,
2664                         mode=070, mask=0)
2665                 catdir_lock = None
2666                 try:
2667                         catdir_lock = portage.locks.lockdir(catdir)
2668                         portage.util.ensure_dirs(catdir,
2669                                 gid=portage.portage_gid,
2670                                 mode=070, mask=0)
2671                         self._lock_obj = portage.locks.lockdir(dir_path)
2672                 finally:
2673                         self.locked = self._lock_obj is not None
2674                         if catdir_lock is not None:
2675                                 portage.locks.unlockdir(catdir_lock)
2676
2677         def clean_log(self):
2678                 """Discard existing log."""
2679                 settings = self.settings
2680
2681                 for x in ('.logid', 'temp/build.log'):
2682                         try:
2683                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2684                         except OSError:
2685                                 pass
2686
2687         def unlock(self):
2688                 if self._lock_obj is None:
2689                         return
2690
2691                 portage.locks.unlockdir(self._lock_obj)
2692                 self._lock_obj = None
2693                 self.locked = False
2694
2695                 catdir = self._catdir
2696                 catdir_lock = None
2697                 try:
2698                         catdir_lock = portage.locks.lockdir(catdir)
2699                 finally:
2700                         if catdir_lock:
2701                                 try:
2702                                         os.rmdir(catdir)
2703                                 except OSError, e:
2704                                         if e.errno not in (errno.ENOENT,
2705                                                 errno.ENOTEMPTY, errno.EEXIST):
2706                                                 raise
2707                                         del e
2708                                 portage.locks.unlockdir(catdir_lock)
2709
2710         class AlreadyLocked(portage.exception.PortageException):
2711                 pass
2712
2713 class EbuildBuild(CompositeTask):
2714
2715         __slots__ = ("args_set", "config_pool", "find_blockers",
2716                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2717                 "prefetcher", "settings", "world_atom") + \
2718                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2719
2720         def _start(self):
2721
2722                 logger = self.logger
2723                 opts = self.opts
2724                 pkg = self.pkg
2725                 settings = self.settings
2726                 world_atom = self.world_atom
2727                 root_config = pkg.root_config
2728                 tree = "porttree"
2729                 self._tree = tree
2730                 portdb = root_config.trees[tree].dbapi
2731                 settings.setcpv(pkg)
2732                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2733                 ebuild_path = portdb.findname(self.pkg.cpv)
2734                 self._ebuild_path = ebuild_path
2735
2736                 prefetcher = self.prefetcher
2737                 if prefetcher is None:
2738                         pass
2739                 elif not prefetcher.isAlive():
2740                         prefetcher.cancel()
2741                 elif prefetcher.poll() is None:
2742
2743                         waiting_msg = "Fetching files " + \
2744                                 "in the background. " + \
2745                                 "To view fetch progress, run `tail -f " + \
2746                                 "/var/log/emerge-fetch.log` in another " + \
2747                                 "terminal."
2748                         msg_prefix = colorize("GOOD", " * ")
2749                         from textwrap import wrap
2750                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2751                                 for line in wrap(waiting_msg, 65))
2752                         if not self.background:
2753                                 writemsg(waiting_msg, noiselevel=-1)
2754
2755                         self._current_task = prefetcher
2756                         prefetcher.addExitListener(self._prefetch_exit)
2757                         return
2758
2759                 self._prefetch_exit(prefetcher)
2760
2761         def _prefetch_exit(self, prefetcher):
2762
2763                 opts = self.opts
2764                 pkg = self.pkg
2765                 settings = self.settings
2766
2767                 if opts.fetchonly:
2768                                 fetcher = EbuildFetchonly(
2769                                         fetch_all=opts.fetch_all_uri,
2770                                         pkg=pkg, pretend=opts.pretend,
2771                                         settings=settings)
2772                                 retval = fetcher.execute()
2773                                 self.returncode = retval
2774                                 self.wait()
2775                                 return
2776
2777                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2778                         fetchall=opts.fetch_all_uri,
2779                         fetchonly=opts.fetchonly,
2780                         background=self.background,
2781                         pkg=pkg, scheduler=self.scheduler)
2782
2783                 self._start_task(fetcher, self._fetch_exit)
2784
2785         def _fetch_exit(self, fetcher):
2786                 opts = self.opts
2787                 pkg = self.pkg
2788
2789                 fetch_failed = False
2790                 if opts.fetchonly:
2791                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2792                 else:
2793                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2794
2795                 if fetch_failed and fetcher.logfile is not None and \
2796                         os.path.exists(fetcher.logfile):
2797                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2798
2799                 if not fetch_failed and fetcher.logfile is not None:
2800                         # Fetch was successful, so remove the fetch log.
2801                         try:
2802                                 os.unlink(fetcher.logfile)
2803                         except OSError:
2804                                 pass
2805
2806                 if fetch_failed or opts.fetchonly:
2807                         self.wait()
2808                         return
2809
2810                 logger = self.logger
2811                 opts = self.opts
2812                 pkg_count = self.pkg_count
2813                 scheduler = self.scheduler
2814                 settings = self.settings
2815                 features = settings.features
2816                 ebuild_path = self._ebuild_path
2817                 system_set = pkg.root_config.sets["system"]
2818
2819                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2820                 self._build_dir.lock()
2821
2822                 # Cleaning is triggered before the setup
2823                 # phase, in portage.doebuild().
2824                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2825                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2826                 short_msg = "emerge: (%s of %s) %s Clean" % \
2827                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2828                 logger.log(msg, short_msg=short_msg)
2829
2830                 #buildsyspkg: Check if we need to _force_ binary package creation
2831                 self._issyspkg = "buildsyspkg" in features and \
2832                                 system_set.findAtomForPackage(pkg) and \
2833                                 not opts.buildpkg
2834
2835                 if opts.buildpkg or self._issyspkg:
2836
2837                         self._buildpkg = True
2838
2839                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2840                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2841                         short_msg = "emerge: (%s of %s) %s Compile" % \
2842                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2843                         logger.log(msg, short_msg=short_msg)
2844
2845                 else:
2846                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2847                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2848                         short_msg = "emerge: (%s of %s) %s Compile" % \
2849                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2850                         logger.log(msg, short_msg=short_msg)
2851
2852                 build = EbuildExecuter(background=self.background, pkg=pkg,
2853                         scheduler=scheduler, settings=settings)
2854                 self._start_task(build, self._build_exit)
2855
2856         def _unlock_builddir(self):
2857                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2858                 self._build_dir.unlock()
2859
2860         def _build_exit(self, build):
2861                 if self._default_exit(build) != os.EX_OK:
2862                         self._unlock_builddir()
2863                         self.wait()
2864                         return
2865
2866                 opts = self.opts
2867                 buildpkg = self._buildpkg
2868
2869                 if not buildpkg:
2870                         self._final_exit(build)
2871                         self.wait()
2872                         return
2873
2874                 if self._issyspkg:
2875                         msg = ">>> This is a system package, " + \
2876                                 "let's pack a rescue tarball.\n"
2877
2878                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2879                         if log_path is not None:
2880                                 log_file = open(log_path, 'a')
2881                                 try:
2882                                         log_file.write(msg)
2883                                 finally:
2884                                         log_file.close()
2885
2886                         if not self.background:
2887                                 portage.writemsg_stdout(msg, noiselevel=-1)
2888
2889                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2890                         scheduler=self.scheduler, settings=self.settings)
2891
2892                 self._start_task(packager, self._buildpkg_exit)
2893
2894         def _buildpkg_exit(self, packager):
2895                 """
2896                 Released build dir lock when there is a failure or
2897                 when in buildpkgonly mode. Otherwise, the lock will
2898                 be released when merge() is called.
2899                 """
2900
2901                 if self._default_exit(packager) != os.EX_OK:
2902                         self._unlock_builddir()
2903                         self.wait()
2904                         return
2905
2906                 if self.opts.buildpkgonly:
2907                         # Need to call "clean" phase for buildpkgonly mode
2908                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2909                         phase = "clean"
2910                         clean_phase = EbuildPhase(background=self.background,
2911                                 pkg=self.pkg, phase=phase,
2912                                 scheduler=self.scheduler, settings=self.settings,
2913                                 tree=self._tree)
2914                         self._start_task(clean_phase, self._clean_exit)
2915                         return
2916
2917                 # Continue holding the builddir lock until
2918                 # after the package has been installed.
2919                 self._current_task = None
2920                 self.returncode = packager.returncode
2921                 self.wait()
2922
2923         def _clean_exit(self, clean_phase):
2924                 if self._final_exit(clean_phase) != os.EX_OK or \
2925                         self.opts.buildpkgonly:
2926                         self._unlock_builddir()
2927                 self.wait()
2928
2929         def install(self):
2930                 """
2931                 Install the package and then clean up and release locks.
2932                 Only call this after the build has completed successfully
2933                 and neither fetchonly nor buildpkgonly mode are enabled.
2934                 """
2935
2936                 find_blockers = self.find_blockers
2937                 ldpath_mtimes = self.ldpath_mtimes
2938                 logger = self.logger
2939                 pkg = self.pkg
2940                 pkg_count = self.pkg_count
2941                 settings = self.settings
2942                 world_atom = self.world_atom
2943                 ebuild_path = self._ebuild_path
2944                 tree = self._tree
2945
2946                 merge = EbuildMerge(find_blockers=self.find_blockers,
2947                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2948                         pkg_count=pkg_count, pkg_path=ebuild_path,
2949                         scheduler=self.scheduler,
2950                         settings=settings, tree=tree, world_atom=world_atom)
2951
2952                 msg = " === (%s of %s) Merging (%s::%s)" % \
2953                         (pkg_count.curval, pkg_count.maxval,
2954                         pkg.cpv, ebuild_path)
2955                 short_msg = "emerge: (%s of %s) %s Merge" % \
2956                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2957                 logger.log(msg, short_msg=short_msg)
2958
2959                 try:
2960                         rval = merge.execute()
2961                 finally:
2962                         self._unlock_builddir()
2963
2964                 return rval
2965
2966 class EbuildExecuter(CompositeTask):
2967
2968         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2969
2970         _phases = ("prepare", "configure", "compile", "test", "install")
2971
2972         _live_eclasses = frozenset([
2973                 "bzr",
2974                 "cvs",
2975                 "darcs",
2976                 "git",
2977                 "mercurial",
2978                 "subversion"
2979         ])
2980
2981         def _start(self):
2982                 self._tree = "porttree"
2983                 pkg = self.pkg
2984                 phase = "clean"
2985                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2986                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2987                 self._start_task(clean_phase, self._clean_phase_exit)
2988
2989         def _clean_phase_exit(self, clean_phase):
2990
2991                 if self._default_exit(clean_phase) != os.EX_OK:
2992                         self.wait()
2993                         return
2994
2995                 pkg = self.pkg
2996                 scheduler = self.scheduler
2997                 settings = self.settings
2998                 cleanup = 1
2999
3000                 # This initializes PORTAGE_LOG_FILE.
3001                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3002
3003                 setup_phase = EbuildPhase(background=self.background,
3004                         pkg=pkg, phase="setup", scheduler=scheduler,
3005                         settings=settings, tree=self._tree)
3006
3007                 setup_phase.addExitListener(self._setup_exit)
3008                 self._current_task = setup_phase
3009                 self.scheduler.scheduleSetup(setup_phase)
3010
3011         def _setup_exit(self, setup_phase):
3012
3013                 if self._default_exit(setup_phase) != os.EX_OK:
3014                         self.wait()
3015                         return
3016
3017                 unpack_phase = EbuildPhase(background=self.background,
3018                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3019                         settings=self.settings, tree=self._tree)
3020
3021                 if self._live_eclasses.intersection(self.pkg.inherited):
3022                         # Serialize $DISTDIR access for live ebuilds since
3023                         # otherwise they can interfere with eachother.
3024
3025                         unpack_phase.addExitListener(self._unpack_exit)
3026                         self._current_task = unpack_phase
3027                         self.scheduler.scheduleUnpack(unpack_phase)
3028
3029                 else:
3030                         self._start_task(unpack_phase, self._unpack_exit)
3031
3032         def _unpack_exit(self, unpack_phase):
3033
3034                 if self._default_exit(unpack_phase) != os.EX_OK:
3035                         self.wait()
3036                         return
3037
3038                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3039
3040                 pkg = self.pkg
3041                 phases = self._phases
3042                 eapi = pkg.metadata["EAPI"]
3043                 if eapi in ("0", "1"):
3044                         # skip src_prepare and src_configure
3045                         phases = phases[2:]
3046
3047                 for phase in phases:
3048                         ebuild_phases.add(EbuildPhase(background=self.background,
3049                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3050                                 settings=self.settings, tree=self._tree))
3051
3052                 self._start_task(ebuild_phases, self._default_final_exit)
3053
3054 class EbuildMetadataPhase(SubProcess):
3055
3056         """
3057         Asynchronous interface for the ebuild "depend" phase which is
3058         used to extract metadata from the ebuild.
3059         """
3060
3061         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3062                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3063                 ("_raw_metadata",)
3064
3065         _file_names = ("ebuild",)
3066         _files_dict = slot_dict_class(_file_names, prefix="")
3067         _metadata_fd = 9
3068
3069         def _start(self):
3070                 settings = self.settings
3071                 settings.setcpv(self.cpv)
3072                 ebuild_path = self.ebuild_path
3073
3074                 eapi = None
3075                 if 'parse-eapi-glep-55' in settings.features:
3076                         pf, eapi = portage._split_ebuild_name_glep55(
3077                                 os.path.basename(ebuild_path))
3078                 if eapi is None and \
3079                         'parse-eapi-ebuild-head' in settings.features:
3080                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3081                                 mode='r', encoding='utf_8', errors='replace'))
3082
3083                 if eapi is not None:
3084                         if not portage.eapi_is_supported(eapi):
3085                                 self.metadata_callback(self.cpv, self.ebuild_path,
3086                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3087                                 self.returncode = os.EX_OK
3088                                 self.wait()
3089                                 return
3090
3091                         settings.configdict['pkg']['EAPI'] = eapi
3092
3093                 debug = settings.get("PORTAGE_DEBUG") == "1"
3094                 master_fd = None
3095                 slave_fd = None
3096                 fd_pipes = None
3097                 if self.fd_pipes is not None:
3098                         fd_pipes = self.fd_pipes.copy()
3099                 else:
3100                         fd_pipes = {}
3101
3102                 fd_pipes.setdefault(0, sys.stdin.fileno())
3103                 fd_pipes.setdefault(1, sys.stdout.fileno())
3104                 fd_pipes.setdefault(2, sys.stderr.fileno())
3105
3106                 # flush any pending output
3107                 for fd in fd_pipes.itervalues():
3108                         if fd == sys.stdout.fileno():
3109                                 sys.stdout.flush()
3110                         if fd == sys.stderr.fileno():
3111                                 sys.stderr.flush()
3112
3113                 fd_pipes_orig = fd_pipes.copy()
3114                 self._files = self._files_dict()
3115                 files = self._files
3116
3117                 master_fd, slave_fd = os.pipe()
3118                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3119                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3120
3121                 fd_pipes[self._metadata_fd] = slave_fd
3122
3123                 self._raw_metadata = []
3124                 files.ebuild = os.fdopen(master_fd, 'r')
3125                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3126                         self._registered_events, self._output_handler)
3127                 self._registered = True
3128
3129                 retval = portage.doebuild(ebuild_path, "depend",
3130                         settings["ROOT"], settings, debug,
3131                         mydbapi=self.portdb, tree="porttree",
3132                         fd_pipes=fd_pipes, returnpid=True)
3133
3134                 os.close(slave_fd)
3135
3136                 if isinstance(retval, int):
3137                         # doebuild failed before spawning
3138                         self._unregister()
3139                         self.returncode = retval
3140                         self.wait()
3141                         return
3142
3143                 self.pid = retval[0]
3144                 portage.process.spawned_pids.remove(self.pid)
3145
3146         def _output_handler(self, fd, event):
3147
3148                 if event & PollConstants.POLLIN:
3149                         self._raw_metadata.append(self._files.ebuild.read())
3150                         if not self._raw_metadata[-1]:
3151                                 self._unregister()
3152                                 self.wait()
3153
3154                 self._unregister_if_appropriate(event)
3155                 return self._registered
3156
3157         def _set_returncode(self, wait_retval):
3158                 SubProcess._set_returncode(self, wait_retval)
3159                 if self.returncode == os.EX_OK:
3160                         metadata_lines = "".join(self._raw_metadata).splitlines()
3161                         if len(portage.auxdbkeys) != len(metadata_lines):
3162                                 # Don't trust bash's returncode if the
3163                                 # number of lines is incorrect.
3164                                 self.returncode = 1
3165                         else:
3166                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3167                                 self.metadata = self.metadata_callback(self.cpv,
3168                                         self.ebuild_path, self.repo_path, metadata,
3169                                         self.ebuild_mtime)
3170
3171 class EbuildProcess(SpawnProcess):
3172
3173         __slots__ = ("phase", "pkg", "settings", "tree")
3174
3175         def _start(self):
3176                 # Don't open the log file during the clean phase since the
3177                 # open file can result in an nfs lock on $T/build.log which
3178                 # prevents the clean phase from removing $T.
3179                 if self.phase not in ("clean", "cleanrm"):
3180                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3181                 SpawnProcess._start(self)
3182
3183         def _pipe(self, fd_pipes):
3184                 stdout_pipe = fd_pipes.get(1)
3185                 got_pty, master_fd, slave_fd = \
3186                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3187                 return (master_fd, slave_fd)
3188
3189         def _spawn(self, args, **kwargs):
3190
3191                 root_config = self.pkg.root_config
3192                 tree = self.tree
3193                 mydbapi = root_config.trees[tree].dbapi
3194                 settings = self.settings
3195                 ebuild_path = settings["EBUILD"]
3196                 debug = settings.get("PORTAGE_DEBUG") == "1"
3197
3198                 rval = portage.doebuild(ebuild_path, self.phase,
3199                         root_config.root, settings, debug,
3200                         mydbapi=mydbapi, tree=tree, **kwargs)
3201
3202                 return rval
3203
3204         def _set_returncode(self, wait_retval):
3205                 SpawnProcess._set_returncode(self, wait_retval)
3206
3207                 if self.phase not in ("clean", "cleanrm"):
3208                         self.returncode = portage._doebuild_exit_status_check_and_log(
3209                                 self.settings, self.phase, self.returncode)
3210
3211                 if self.phase == "test" and self.returncode != os.EX_OK and \
3212                         "test-fail-continue" in self.settings.features:
3213                         self.returncode = os.EX_OK
3214
3215                 portage._post_phase_userpriv_perms(self.settings)
3216
3217 class EbuildPhase(CompositeTask):
3218
3219         __slots__ = ("background", "pkg", "phase",
3220                 "scheduler", "settings", "tree")
3221
3222         _post_phase_cmds = portage._post_phase_cmds
3223
3224         def _start(self):
3225
3226                 ebuild_process = EbuildProcess(background=self.background,
3227                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3228                         settings=self.settings, tree=self.tree)
3229
3230                 self._start_task(ebuild_process, self._ebuild_exit)
3231
3232         def _ebuild_exit(self, ebuild_process):
3233
3234                 if self.phase == "install":
3235                         out = None
3236                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3237                         log_file = None
3238                         if self.background and log_path is not None:
3239                                 log_file = open(log_path, 'a')
3240                                 out = log_file
3241                         try:
3242                                 portage._check_build_log(self.settings, out=out)
3243                         finally:
3244                                 if log_file is not None:
3245                                         log_file.close()
3246
3247                 if self._default_exit(ebuild_process) != os.EX_OK:
3248                         self.wait()
3249                         return
3250
3251                 settings = self.settings
3252
3253                 if self.phase == "install":
3254                         portage._post_src_install_chost_fix(settings)
3255                         portage._post_src_install_uid_fix(settings)
3256
3257                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3258                 if post_phase_cmds is not None:
3259                         post_phase = MiscFunctionsProcess(background=self.background,
3260                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3261                                 scheduler=self.scheduler, settings=settings)
3262                         self._start_task(post_phase, self._post_phase_exit)
3263                         return
3264
3265                 self.returncode = ebuild_process.returncode
3266                 self._current_task = None
3267                 self.wait()
3268
3269         def _post_phase_exit(self, post_phase):
3270                 if self._final_exit(post_phase) != os.EX_OK:
3271                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3272                                 noiselevel=-1)
3273                 self._current_task = None
3274                 self.wait()
3275                 return
3276
3277 class EbuildBinpkg(EbuildProcess):
3278         """
3279         This assumes that src_install() has successfully completed.
3280         """
3281         __slots__ = ("_binpkg_tmpfile",)
3282
3283         def _start(self):
3284                 self.phase = "package"
3285                 self.tree = "porttree"
3286                 pkg = self.pkg
3287                 root_config = pkg.root_config
3288                 portdb = root_config.trees["porttree"].dbapi
3289                 bintree = root_config.trees["bintree"]
3290                 ebuild_path = portdb.findname(self.pkg.cpv)
3291                 settings = self.settings
3292                 debug = settings.get("PORTAGE_DEBUG") == "1"
3293
3294                 bintree.prevent_collision(pkg.cpv)
3295                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3296                         pkg.cpv + ".tbz2." + str(os.getpid()))
3297                 self._binpkg_tmpfile = binpkg_tmpfile
3298                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3299                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3300
3301                 try:
3302                         EbuildProcess._start(self)
3303                 finally:
3304                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3305
3306         def _set_returncode(self, wait_retval):
3307                 EbuildProcess._set_returncode(self, wait_retval)
3308
3309                 pkg = self.pkg
3310                 bintree = pkg.root_config.trees["bintree"]
3311                 binpkg_tmpfile = self._binpkg_tmpfile
3312                 if self.returncode == os.EX_OK:
3313                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3314
3315 class EbuildMerge(SlotObject):
3316
3317         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3318                 "pkg", "pkg_count", "pkg_path", "pretend",
3319                 "scheduler", "settings", "tree", "world_atom")
3320
3321         def execute(self):
3322                 root_config = self.pkg.root_config
3323                 settings = self.settings
3324                 retval = portage.merge(settings["CATEGORY"],
3325                         settings["PF"], settings["D"],
3326                         os.path.join(settings["PORTAGE_BUILDDIR"],
3327                         "build-info"), root_config.root, settings,
3328                         myebuild=settings["EBUILD"],
3329                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3330                         vartree=root_config.trees["vartree"],
3331                         prev_mtimes=self.ldpath_mtimes,
3332                         scheduler=self.scheduler,
3333                         blockers=self.find_blockers)
3334
3335                 if retval == os.EX_OK:
3336                         self.world_atom(self.pkg)
3337                         self._log_success()
3338
3339                 return retval
3340
3341         def _log_success(self):
3342                 pkg = self.pkg
3343                 pkg_count = self.pkg_count
3344                 pkg_path = self.pkg_path
3345                 logger = self.logger
3346                 if "noclean" not in self.settings.features:
3347                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3348                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3349                         logger.log((" === (%s of %s) " + \
3350                                 "Post-Build Cleaning (%s::%s)") % \
3351                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3352                                 short_msg=short_msg)
3353                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3354                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3355
3356 class PackageUninstall(AsynchronousTask):
3357
3358         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3359
3360         def _start(self):
3361                 try:
3362                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3363                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3364                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3365                                 writemsg_level=self._writemsg_level)
3366                 except UninstallFailure, e:
3367                         self.returncode = e.status
3368                 else:
3369                         self.returncode = os.EX_OK
3370                 self.wait()
3371
3372         def _writemsg_level(self, msg, level=0, noiselevel=0):
3373
3374                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3375                 background = self.background
3376
3377                 if log_path is None:
3378                         if not (background and level < logging.WARNING):
3379                                 portage.util.writemsg_level(msg,
3380                                         level=level, noiselevel=noiselevel)
3381                 else:
3382                         if not background:
3383                                 portage.util.writemsg_level(msg,
3384                                         level=level, noiselevel=noiselevel)
3385
3386                         f = open(log_path, 'a')
3387                         try:
3388                                 f.write(msg)
3389                         finally:
3390                                 f.close()
3391
3392 class Binpkg(CompositeTask):
3393
3394         __slots__ = ("find_blockers",
3395                 "ldpath_mtimes", "logger", "opts",
3396                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3397                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3398                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3399
3400         def _writemsg_level(self, msg, level=0, noiselevel=0):
3401
3402                 if not self.background:
3403                         portage.util.writemsg_level(msg,
3404                                 level=level, noiselevel=noiselevel)
3405
3406                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3407                 if  log_path is not None:
3408                         f = open(log_path, 'a')
3409                         try:
3410                                 f.write(msg)
3411                         finally:
3412                                 f.close()
3413
3414         def _start(self):
3415
3416                 pkg = self.pkg
3417                 settings = self.settings
3418                 settings.setcpv(pkg)
3419                 self._tree = "bintree"
3420                 self._bintree = self.pkg.root_config.trees[self._tree]
3421                 self._verify = not self.opts.pretend
3422
3423                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3424                         "portage", pkg.category, pkg.pf)
3425                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3426                         pkg=pkg, settings=settings)
3427                 self._image_dir = os.path.join(dir_path, "image")
3428                 self._infloc = os.path.join(dir_path, "build-info")
3429                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3430                 settings["EBUILD"] = self._ebuild_path
3431                 debug = settings.get("PORTAGE_DEBUG") == "1"
3432                 portage.doebuild_environment(self._ebuild_path, "setup",
3433                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3434                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3435
3436                 # The prefetcher has already completed or it
3437                 # could be running now. If it's running now,
3438                 # wait for it to complete since it holds
3439                 # a lock on the file being fetched. The
3440                 # portage.locks functions are only designed
3441                 # to work between separate processes. Since
3442                 # the lock is held by the current process,
3443                 # use the scheduler and fetcher methods to
3444                 # synchronize with the fetcher.
3445                 prefetcher = self.prefetcher
3446                 if prefetcher is None:
3447                         pass
3448                 elif not prefetcher.isAlive():
3449                         prefetcher.cancel()
3450                 elif prefetcher.poll() is None:
3451
3452                         waiting_msg = ("Fetching '%s' " + \
3453                                 "in the background. " + \
3454                                 "To view fetch progress, run `tail -f " + \
3455                                 "/var/log/emerge-fetch.log` in another " + \
3456                                 "terminal.") % prefetcher.pkg_path
3457                         msg_prefix = colorize("GOOD", " * ")
3458                         from textwrap import wrap
3459                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3460                                 for line in wrap(waiting_msg, 65))
3461                         if not self.background:
3462                                 writemsg(waiting_msg, noiselevel=-1)
3463
3464                         self._current_task = prefetcher
3465                         prefetcher.addExitListener(self._prefetch_exit)
3466                         return
3467
3468                 self._prefetch_exit(prefetcher)
3469
3470         def _prefetch_exit(self, prefetcher):
3471
3472                 pkg = self.pkg
3473                 pkg_count = self.pkg_count
3474                 if not (self.opts.pretend or self.opts.fetchonly):
3475                         self._build_dir.lock()
3476                         # If necessary, discard old log so that we don't
3477                         # append to it.
3478                         self._build_dir.clean_log()
3479                         # Initialze PORTAGE_LOG_FILE.
3480                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3481                 fetcher = BinpkgFetcher(background=self.background,
3482                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3483                         pretend=self.opts.pretend, scheduler=self.scheduler)
3484                 pkg_path = fetcher.pkg_path
3485                 self._pkg_path = pkg_path
3486
3487                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3488
3489                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3490                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3491                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3492                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3493                         self.logger.log(msg, short_msg=short_msg)
3494                         self._start_task(fetcher, self._fetcher_exit)
3495                         return
3496
3497                 self._fetcher_exit(fetcher)
3498
3499         def _fetcher_exit(self, fetcher):
3500
3501                 # The fetcher only has a returncode when
3502                 # --getbinpkg is enabled.
3503                 if fetcher.returncode is not None:
3504                         self._fetched_pkg = True
3505                         if self._default_exit(fetcher) != os.EX_OK:
3506                                 self._unlock_builddir()
3507                                 self.wait()
3508                                 return
3509
3510                 if self.opts.pretend:
3511                         self._current_task = None
3512                         self.returncode = os.EX_OK
3513                         self.wait()
3514                         return
3515
3516                 verifier = None
3517                 if self._verify:
3518                         logfile = None
3519                         if self.background:
3520                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3521                         verifier = BinpkgVerifier(background=self.background,
3522                                 logfile=logfile, pkg=self.pkg)
3523                         self._start_task(verifier, self._verifier_exit)
3524                         return
3525
3526                 self._verifier_exit(verifier)
3527
3528         def _verifier_exit(self, verifier):
3529                 if verifier is not None and \
3530                         self._default_exit(verifier) != os.EX_OK:
3531                         self._unlock_builddir()
3532                         self.wait()
3533                         return
3534
3535                 logger = self.logger
3536                 pkg = self.pkg
3537                 pkg_count = self.pkg_count
3538                 pkg_path = self._pkg_path
3539
3540                 if self._fetched_pkg:
3541                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3542
3543                 if self.opts.fetchonly:
3544                         self._current_task = None
3545                         self.returncode = os.EX_OK
3546                         self.wait()
3547                         return
3548
3549                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3550                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3551                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3552                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3553                 logger.log(msg, short_msg=short_msg)
3554
3555                 phase = "clean"
3556                 settings = self.settings
3557                 ebuild_phase = EbuildPhase(background=self.background,
3558                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3559                         settings=settings, tree=self._tree)
3560
3561                 self._start_task(ebuild_phase, self._clean_exit)
3562
3563         def _clean_exit(self, clean_phase):
3564                 if self._default_exit(clean_phase) != os.EX_OK:
3565                         self._unlock_builddir()
3566                         self.wait()
3567                         return
3568
3569                 dir_path = self._build_dir.dir_path
3570
3571                 infloc = self._infloc
3572                 pkg = self.pkg
3573                 pkg_path = self._pkg_path
3574
3575                 dir_mode = 0755
3576                 for mydir in (dir_path, self._image_dir, infloc):
3577                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3578                                 gid=portage.data.portage_gid, mode=dir_mode)
3579
3580                 # This initializes PORTAGE_LOG_FILE.
3581                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3582                 self._writemsg_level(">>> Extracting info\n")
3583
3584                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3585                 check_missing_metadata = ("CATEGORY", "PF")
3586                 missing_metadata = set()
3587                 for k in check_missing_metadata:
3588                         v = pkg_xpak.getfile(k)
3589                         if not v:
3590                                 missing_metadata.add(k)
3591
3592                 pkg_xpak.unpackinfo(infloc)
3593                 for k in missing_metadata:
3594                         if k == "CATEGORY":
3595                                 v = pkg.category
3596                         elif k == "PF":
3597                                 v = pkg.pf
3598                         else:
3599                                 continue
3600
3601                         f = open(os.path.join(infloc, k), 'wb')
3602                         try:
3603                                 f.write(v + "\n")
3604                         finally:
3605                                 f.close()
3606
3607                 # Store the md5sum in the vdb.
3608                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3609                 try:
3610                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3611                 finally:
3612                         f.close()
3613
3614                 # This gives bashrc users an opportunity to do various things
3615                 # such as remove binary packages after they're installed.
3616                 settings = self.settings
3617                 settings.setcpv(self.pkg)
3618                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3619                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3620
3621                 phase = "setup"
3622                 setup_phase = EbuildPhase(background=self.background,
3623                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3624                         settings=settings, tree=self._tree)
3625
3626                 setup_phase.addExitListener(self._setup_exit)
3627                 self._current_task = setup_phase
3628                 self.scheduler.scheduleSetup(setup_phase)
3629
3630         def _setup_exit(self, setup_phase):
3631                 if self._default_exit(setup_phase) != os.EX_OK:
3632                         self._unlock_builddir()
3633                         self.wait()
3634                         return
3635
3636                 extractor = BinpkgExtractorAsync(background=self.background,
3637                         image_dir=self._image_dir,
3638                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3639                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3640                 self._start_task(extractor, self._extractor_exit)
3641
3642         def _extractor_exit(self, extractor):
3643                 if self._final_exit(extractor) != os.EX_OK:
3644                         self._unlock_builddir()
3645                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3646                                 noiselevel=-1)
3647                 self.wait()
3648
3649         def _unlock_builddir(self):
3650                 if self.opts.pretend or self.opts.fetchonly:
3651                         return
3652                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3653                 self._build_dir.unlock()
3654
3655         def install(self):
3656
3657                 # This gives bashrc users an opportunity to do various things
3658                 # such as remove binary packages after they're installed.
3659                 settings = self.settings
3660                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3661                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3662
3663                 merge = EbuildMerge(find_blockers=self.find_blockers,
3664                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3665                         pkg=self.pkg, pkg_count=self.pkg_count,
3666                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3667                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3668
3669                 try:
3670                         retval = merge.execute()
3671                 finally:
3672                         settings.pop("PORTAGE_BINPKG_FILE", None)
3673                         self._unlock_builddir()
3674                 return retval
3675
3676 class BinpkgFetcher(SpawnProcess):
3677
3678         __slots__ = ("pkg", "pretend",
3679                 "locked", "pkg_path", "_lock_obj")
3680
3681         def __init__(self, **kwargs):
3682                 SpawnProcess.__init__(self, **kwargs)
3683                 pkg = self.pkg
3684                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3685
3686         def _start(self):
3687
3688                 if self.cancelled:
3689                         return
3690
3691                 pkg = self.pkg
3692                 pretend = self.pretend
3693                 bintree = pkg.root_config.trees["bintree"]
3694                 settings = bintree.settings
3695                 use_locks = "distlocks" in settings.features
3696                 pkg_path = self.pkg_path
3697
3698                 if not pretend:
3699                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3700                         if use_locks:
3701                                 self.lock()
3702                 exists = os.path.exists(pkg_path)
3703                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3704                 if not (pretend or resume):
3705                         # Remove existing file or broken symlink.
3706                         try:
3707                                 os.unlink(pkg_path)
3708                         except OSError:
3709                                 pass
3710
3711                 # urljoin doesn't work correctly with
3712                 # unrecognized protocols like sftp
3713                 if bintree._remote_has_index:
3714                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3715                         if not rel_uri:
3716                                 rel_uri = pkg.cpv + ".tbz2"
3717                         uri = bintree._remote_base_uri.rstrip("/") + \
3718                                 "/" + rel_uri.lstrip("/")
3719                 else:
3720                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3721                                 "/" + pkg.pf + ".tbz2"
3722
3723                 if pretend:
3724                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3725                         self.returncode = os.EX_OK
3726                         self.wait()
3727                         return
3728
3729                 protocol = urlparse.urlparse(uri)[0]
3730                 fcmd_prefix = "FETCHCOMMAND"
3731                 if resume:
3732                         fcmd_prefix = "RESUMECOMMAND"
3733                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3734                 if not fcmd:
3735                         fcmd = settings.get(fcmd_prefix)
3736
3737                 fcmd_vars = {
3738                         "DISTDIR" : os.path.dirname(pkg_path),
3739                         "URI"     : uri,
3740                         "FILE"    : os.path.basename(pkg_path)
3741                 }
3742
3743                 fetch_env = dict(settings.iteritems())
3744                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3745                         for x in shlex.split(fcmd)]
3746
3747                 if self.fd_pipes is None:
3748                         self.fd_pipes = {}
3749                 fd_pipes = self.fd_pipes
3750
3751                 # Redirect all output to stdout since some fetchers like
3752                 # wget pollute stderr (if portage detects a problem then it
3753                 # can send it's own message to stderr).
3754                 fd_pipes.setdefault(0, sys.stdin.fileno())
3755                 fd_pipes.setdefault(1, sys.stdout.fileno())
3756                 fd_pipes.setdefault(2, sys.stdout.fileno())
3757
3758                 self.args = fetch_args
3759                 self.env = fetch_env
3760                 SpawnProcess._start(self)
3761
3762         def _set_returncode(self, wait_retval):
3763                 SpawnProcess._set_returncode(self, wait_retval)
3764                 if self.returncode == os.EX_OK:
3765                         # If possible, update the mtime to match the remote package if
3766                         # the fetcher didn't already do it automatically.
3767                         bintree = self.pkg.root_config.trees["bintree"]
3768                         if bintree._remote_has_index:
3769                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3770                                 if remote_mtime is not None:
3771                                         try:
3772                                                 remote_mtime = long(remote_mtime)
3773                                         except ValueError:
3774                                                 pass
3775                                         else:
3776                                                 try:
3777                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3778                                                 except OSError:
3779                                                         pass
3780                                                 else:
3781                                                         if remote_mtime != local_mtime:
3782                                                                 try:
3783                                                                         os.utime(self.pkg_path,
3784                                                                                 (remote_mtime, remote_mtime))
3785                                                                 except OSError:
3786                                                                         pass
3787
3788                 if self.locked:
3789                         self.unlock()
3790
3791         def lock(self):
3792                 """
3793                 This raises an AlreadyLocked exception if lock() is called
3794                 while a lock is already held. In order to avoid this, call
3795                 unlock() or check whether the "locked" attribute is True
3796                 or False before calling lock().
3797                 """
3798                 if self._lock_obj is not None:
3799                         raise self.AlreadyLocked((self._lock_obj,))
3800
3801                 self._lock_obj = portage.locks.lockfile(
3802                         self.pkg_path, wantnewlockfile=1)
3803                 self.locked = True
3804
3805         class AlreadyLocked(portage.exception.PortageException):
3806                 pass
3807
3808         def unlock(self):
3809                 if self._lock_obj is None:
3810                         return
3811                 portage.locks.unlockfile(self._lock_obj)
3812                 self._lock_obj = None
3813                 self.locked = False
3814
3815 class BinpkgVerifier(AsynchronousTask):
3816         __slots__ = ("logfile", "pkg",)
3817
3818         def _start(self):
3819                 """
3820                 Note: Unlike a normal AsynchronousTask.start() method,
3821                 this one does all work is synchronously. The returncode
3822                 attribute will be set before it returns.
3823                 """
3824
3825                 pkg = self.pkg
3826                 root_config = pkg.root_config
3827                 bintree = root_config.trees["bintree"]
3828                 rval = os.EX_OK
3829                 stdout_orig = sys.stdout
3830                 stderr_orig = sys.stderr
3831                 log_file = None
3832                 if self.background and self.logfile is not None:
3833                         log_file = open(self.logfile, 'a')
3834                 try:
3835                         if log_file is not None:
3836                                 sys.stdout = log_file
3837                                 sys.stderr = log_file
3838                         try:
3839                                 bintree.digestCheck(pkg)
3840                         except portage.exception.FileNotFound:
3841                                 writemsg("!!! Fetching Binary failed " + \
3842                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3843                                 rval = 1
3844                         except portage.exception.DigestException, e:
3845                                 writemsg("\n!!! Digest verification failed:\n",
3846                                         noiselevel=-1)
3847                                 writemsg("!!! %s\n" % e.value[0],
3848                                         noiselevel=-1)
3849                                 writemsg("!!! Reason: %s\n" % e.value[1],
3850                                         noiselevel=-1)
3851                                 writemsg("!!! Got: %s\n" % e.value[2],
3852                                         noiselevel=-1)
3853                                 writemsg("!!! Expected: %s\n" % e.value[3],
3854                                         noiselevel=-1)
3855                                 rval = 1
3856                         if rval != os.EX_OK:
3857                                 pkg_path = bintree.getname(pkg.cpv)
3858                                 head, tail = os.path.split(pkg_path)
3859                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3860                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3861                                         noiselevel=-1)
3862                 finally:
3863                         sys.stdout = stdout_orig
3864                         sys.stderr = stderr_orig
3865                         if log_file is not None:
3866                                 log_file.close()
3867
3868                 self.returncode = rval
3869                 self.wait()
3870
3871 class BinpkgPrefetcher(CompositeTask):
3872
3873         __slots__ = ("pkg",) + \
3874                 ("pkg_path", "_bintree",)
3875
3876         def _start(self):
3877                 self._bintree = self.pkg.root_config.trees["bintree"]
3878                 fetcher = BinpkgFetcher(background=self.background,
3879                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3880                         scheduler=self.scheduler)
3881                 self.pkg_path = fetcher.pkg_path
3882                 self._start_task(fetcher, self._fetcher_exit)
3883
3884         def _fetcher_exit(self, fetcher):
3885
3886                 if self._default_exit(fetcher) != os.EX_OK:
3887                         self.wait()
3888                         return
3889
3890                 verifier = BinpkgVerifier(background=self.background,
3891                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3892                 self._start_task(verifier, self._verifier_exit)
3893
3894         def _verifier_exit(self, verifier):
3895                 if self._default_exit(verifier) != os.EX_OK:
3896                         self.wait()
3897                         return
3898
3899                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3900
3901                 self._current_task = None
3902                 self.returncode = os.EX_OK
3903                 self.wait()
3904
3905 class BinpkgExtractorAsync(SpawnProcess):
3906
3907         __slots__ = ("image_dir", "pkg", "pkg_path")
3908
3909         _shell_binary = portage.const.BASH_BINARY
3910
3911         def _start(self):
3912                 self.args = [self._shell_binary, "-c",
3913                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3914                         (portage._shell_quote(self.pkg_path),
3915                         portage._shell_quote(self.image_dir))]
3916
3917                 self.env = self.pkg.root_config.settings.environ()
3918                 SpawnProcess._start(self)
3919
3920 class MergeListItem(CompositeTask):
3921
3922         """
3923         TODO: For parallel scheduling, everything here needs asynchronous
3924         execution support (start, poll, and wait methods).
3925         """
3926
3927         __slots__ = ("args_set",
3928                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3929                 "find_blockers", "logger", "mtimedb", "pkg",
3930                 "pkg_count", "pkg_to_replace", "prefetcher",
3931                 "settings", "statusMessage", "world_atom") + \
3932                 ("_install_task",)
3933
3934         def _start(self):
3935
3936                 pkg = self.pkg
3937                 build_opts = self.build_opts
3938
3939                 if pkg.installed:
3940                         # uninstall,  executed by self.merge()
3941                         self.returncode = os.EX_OK
3942                         self.wait()
3943                         return
3944
3945                 args_set = self.args_set
3946                 find_blockers = self.find_blockers
3947                 logger = self.logger
3948                 mtimedb = self.mtimedb
3949                 pkg_count = self.pkg_count
3950                 scheduler = self.scheduler
3951                 settings = self.settings
3952                 world_atom = self.world_atom
3953                 ldpath_mtimes = mtimedb["ldpath"]
3954
3955                 action_desc = "Emerging"
3956                 preposition = "for"
3957                 if pkg.type_name == "binary":
3958                         action_desc += " binary"
3959
3960                 if build_opts.fetchonly:
3961                         action_desc = "Fetching"
3962
3963                 msg = "%s (%s of %s) %s" % \
3964                         (action_desc,
3965                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3966                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3967                         colorize("GOOD", pkg.cpv))
3968
3969                 portdb = pkg.root_config.trees["porttree"].dbapi
3970                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3971                 if portdir_repo_name:
3972                         pkg_repo_name = pkg.metadata.get("repository")
3973                         if pkg_repo_name != portdir_repo_name:
3974                                 if not pkg_repo_name:
3975                                         pkg_repo_name = "unknown repo"
3976                                 msg += " from %s" % pkg_repo_name
3977
3978                 if pkg.root != "/":
3979                         msg += " %s %s" % (preposition, pkg.root)
3980
3981                 if not build_opts.pretend:
3982                         self.statusMessage(msg)
3983                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3984                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3985
3986                 if pkg.type_name == "ebuild":
3987
3988                         build = EbuildBuild(args_set=args_set,
3989                                 background=self.background,
3990                                 config_pool=self.config_pool,
3991                                 find_blockers=find_blockers,
3992                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3993                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3994                                 prefetcher=self.prefetcher, scheduler=scheduler,
3995                                 settings=settings, world_atom=world_atom)
3996
3997                         self._install_task = build
3998                         self._start_task(build, self._default_final_exit)
3999                         return
4000
4001                 elif pkg.type_name == "binary":
4002
4003                         binpkg = Binpkg(background=self.background,
4004                                 find_blockers=find_blockers,
4005                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4006                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4007                                 prefetcher=self.prefetcher, settings=settings,
4008                                 scheduler=scheduler, world_atom=world_atom)
4009
4010                         self._install_task = binpkg
4011                         self._start_task(binpkg, self._default_final_exit)
4012                         return
4013
4014         def _poll(self):
4015                 self._install_task.poll()
4016                 return self.returncode
4017
4018         def _wait(self):
4019                 self._install_task.wait()
4020                 return self.returncode
4021
4022         def merge(self):
4023
4024                 pkg = self.pkg
4025                 build_opts = self.build_opts
4026                 find_blockers = self.find_blockers
4027                 logger = self.logger
4028                 mtimedb = self.mtimedb
4029                 pkg_count = self.pkg_count
4030                 prefetcher = self.prefetcher
4031                 scheduler = self.scheduler
4032                 settings = self.settings
4033                 world_atom = self.world_atom
4034                 ldpath_mtimes = mtimedb["ldpath"]
4035
4036                 if pkg.installed:
4037                         if not (build_opts.buildpkgonly or \
4038                                 build_opts.fetchonly or build_opts.pretend):
4039
4040                                 uninstall = PackageUninstall(background=self.background,
4041                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4042                                         pkg=pkg, scheduler=scheduler, settings=settings)
4043
4044                                 uninstall.start()
4045                                 retval = uninstall.wait()
4046                                 if retval != os.EX_OK:
4047                                         return retval
4048                         return os.EX_OK
4049
4050                 if build_opts.fetchonly or \
4051                         build_opts.buildpkgonly:
4052                         return self.returncode
4053
4054                 retval = self._install_task.install()
4055                 return retval
4056
4057 class PackageMerge(AsynchronousTask):
4058         """
4059         TODO: Implement asynchronous merge so that the scheduler can
4060         run while a merge is executing.
4061         """
4062
4063         __slots__ = ("merge",)
4064
4065         def _start(self):
4066
4067                 pkg = self.merge.pkg
4068                 pkg_count = self.merge.pkg_count
4069
4070                 if pkg.installed:
4071                         action_desc = "Uninstalling"
4072                         preposition = "from"
4073                         counter_str = ""
4074                 else:
4075                         action_desc = "Installing"
4076                         preposition = "to"
4077                         counter_str = "(%s of %s) " % \
4078                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4079                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4080
4081                 msg = "%s %s%s" % \
4082                         (action_desc,
4083                         counter_str,
4084                         colorize("GOOD", pkg.cpv))
4085
4086                 if pkg.root != "/":
4087                         msg += " %s %s" % (preposition, pkg.root)
4088
4089                 if not self.merge.build_opts.fetchonly and \
4090                         not self.merge.build_opts.pretend and \
4091                         not self.merge.build_opts.buildpkgonly:
4092                         self.merge.statusMessage(msg)
4093
4094                 self.returncode = self.merge.merge()
4095                 self.wait()
4096
4097 class DependencyArg(object):
4098         def __init__(self, arg=None, root_config=None):
4099                 self.arg = arg
4100                 self.root_config = root_config
4101
4102         def __str__(self):
4103                 return str(self.arg)
4104
4105 class AtomArg(DependencyArg):
4106         def __init__(self, atom=None, **kwargs):
4107                 DependencyArg.__init__(self, **kwargs)
4108                 self.atom = atom
4109                 if not isinstance(self.atom, portage.dep.Atom):
4110                         self.atom = portage.dep.Atom(self.atom)
4111                 self.set = (self.atom, )
4112
4113 class PackageArg(DependencyArg):
4114         def __init__(self, package=None, **kwargs):
4115                 DependencyArg.__init__(self, **kwargs)
4116                 self.package = package
4117                 self.atom = portage.dep.Atom("=" + package.cpv)
4118                 self.set = (self.atom, )
4119
4120 class SetArg(DependencyArg):
4121         def __init__(self, set=None, **kwargs):
4122                 DependencyArg.__init__(self, **kwargs)
4123                 self.set = set
4124                 self.name = self.arg[len(SETPREFIX):]
4125
4126 class Dependency(SlotObject):
4127         __slots__ = ("atom", "blocker", "depth",
4128                 "parent", "onlydeps", "priority", "root")
4129         def __init__(self, **kwargs):
4130                 SlotObject.__init__(self, **kwargs)
4131                 if self.priority is None:
4132                         self.priority = DepPriority()
4133                 if self.depth is None:
4134                         self.depth = 0
4135
4136 class BlockerCache(portage.cache.mappings.MutableMapping):
4137         """This caches blockers of installed packages so that dep_check does not
4138         have to be done for every single installed package on every invocation of
4139         emerge.  The cache is invalidated whenever it is detected that something
4140         has changed that might alter the results of dep_check() calls:
4141                 1) the set of installed packages (including COUNTER) has changed
4142                 2) the old-style virtuals have changed
4143         """
4144
4145         # Number of uncached packages to trigger cache update, since
4146         # it's wasteful to update it for every vdb change.
4147         _cache_threshold = 5
4148
4149         class BlockerData(object):
4150
4151                 __slots__ = ("__weakref__", "atoms", "counter")
4152
4153                 def __init__(self, counter, atoms):
4154                         self.counter = counter
4155                         self.atoms = atoms
4156
4157         def __init__(self, myroot, vardb):
4158                 self._vardb = vardb
4159                 self._virtuals = vardb.settings.getvirtuals()
4160                 self._cache_filename = os.path.join(myroot,
4161                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4162                 self._cache_version = "1"
4163                 self._cache_data = None
4164                 self._modified = set()
4165                 self._load()
4166
4167         def _load(self):
4168                 try:
4169                         f = open(self._cache_filename, mode='rb')
4170                         mypickle = pickle.Unpickler(f)
4171                         try:
4172                                 mypickle.find_global = None
4173                         except AttributeError:
4174                                 # TODO: If py3k, override Unpickler.find_class().
4175                                 pass
4176                         self._cache_data = mypickle.load()
4177                         f.close()
4178                         del f
4179                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4180                         if isinstance(e, pickle.UnpicklingError):
4181                                 writemsg("!!! Error loading '%s': %s\n" % \
4182                                         (self._cache_filename, str(e)), noiselevel=-1)
4183                         del e
4184
4185                 cache_valid = self._cache_data and \
4186                         isinstance(self._cache_data, dict) and \
4187                         self._cache_data.get("version") == self._cache_version and \
4188                         isinstance(self._cache_data.get("blockers"), dict)
4189                 if cache_valid:
4190                         # Validate all the atoms and counters so that
4191                         # corruption is detected as soon as possible.
4192                         invalid_items = set()
4193                         for k, v in self._cache_data["blockers"].iteritems():
4194                                 if not isinstance(k, basestring):
4195                                         invalid_items.add(k)
4196                                         continue
4197                                 try:
4198                                         if portage.catpkgsplit(k) is None:
4199                                                 invalid_items.add(k)
4200                                                 continue
4201                                 except portage.exception.InvalidData:
4202                                         invalid_items.add(k)
4203                                         continue
4204                                 if not isinstance(v, tuple) or \
4205                                         len(v) != 2:
4206                                         invalid_items.add(k)
4207                                         continue
4208                                 counter, atoms = v
4209                                 if not isinstance(counter, (int, long)):
4210                                         invalid_items.add(k)
4211                                         continue
4212                                 if not isinstance(atoms, (list, tuple)):
4213                                         invalid_items.add(k)
4214                                         continue
4215                                 invalid_atom = False
4216                                 for atom in atoms:
4217                                         if not isinstance(atom, basestring):
4218                                                 invalid_atom = True
4219                                                 break
4220                                         if atom[:1] != "!" or \
4221                                                 not portage.isvalidatom(
4222                                                 atom, allow_blockers=True):
4223                                                 invalid_atom = True
4224                                                 break
4225                                 if invalid_atom:
4226                                         invalid_items.add(k)
4227                                         continue
4228
4229                         for k in invalid_items:
4230                                 del self._cache_data["blockers"][k]
4231                         if not self._cache_data["blockers"]:
4232                                 cache_valid = False
4233
4234                 if not cache_valid:
4235                         self._cache_data = {"version":self._cache_version}
4236                         self._cache_data["blockers"] = {}
4237                         self._cache_data["virtuals"] = self._virtuals
4238                 self._modified.clear()
4239
4240         def flush(self):
4241                 """If the current user has permission and the internal blocker cache
4242                 been updated, save it to disk and mark it unmodified.  This is called
4243                 by emerge after it has proccessed blockers for all installed packages.
4244                 Currently, the cache is only written if the user has superuser
4245                 privileges (since that's required to obtain a lock), but all users
4246                 have read access and benefit from faster blocker lookups (as long as
4247                 the entire cache is still valid).  The cache is stored as a pickled
4248                 dict object with the following format:
4249
4250                 {
4251                         version : "1",
4252                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4253                         "virtuals" : vardb.settings.getvirtuals()
4254                 }
4255                 """
4256                 if len(self._modified) >= self._cache_threshold and \
4257                         secpass >= 2:
4258                         try:
4259                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4260                                 pickle.dump(self._cache_data, f, protocol=2)
4261                                 f.close()
4262                                 portage.util.apply_secpass_permissions(
4263                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4264                         except (IOError, OSError), e:
4265                                 pass
4266                         self._modified.clear()
4267
4268         def __setitem__(self, cpv, blocker_data):
4269                 """
4270                 Update the cache and mark it as modified for a future call to
4271                 self.flush().
4272
4273                 @param cpv: Package for which to cache blockers.
4274                 @type cpv: String
4275                 @param blocker_data: An object with counter and atoms attributes.
4276                 @type blocker_data: BlockerData
4277                 """
4278                 self._cache_data["blockers"][cpv] = \
4279                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4280                 self._modified.add(cpv)
4281
4282         def __iter__(self):
4283                 if self._cache_data is None:
4284                         # triggered by python-trace
4285                         return iter([])
4286                 return iter(self._cache_data["blockers"])
4287
4288         def __delitem__(self, cpv):
4289                 del self._cache_data["blockers"][cpv]
4290
4291         def __getitem__(self, cpv):
4292                 """
4293                 @rtype: BlockerData
4294                 @returns: An object with counter and atoms attributes.
4295                 """
4296                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4297
4298 class BlockerDB(object):
4299
4300         def __init__(self, root_config):
4301                 self._root_config = root_config
4302                 self._vartree = root_config.trees["vartree"]
4303                 self._portdb = root_config.trees["porttree"].dbapi
4304
4305                 self._dep_check_trees = None
4306                 self._fake_vartree = None
4307
4308         def _get_fake_vartree(self, acquire_lock=0):
4309                 fake_vartree = self._fake_vartree
4310                 if fake_vartree is None:
4311                         fake_vartree = FakeVartree(self._root_config,
4312                                 acquire_lock=acquire_lock)
4313                         self._fake_vartree = fake_vartree
4314                         self._dep_check_trees = { self._vartree.root : {
4315                                 "porttree"    :  fake_vartree,
4316                                 "vartree"     :  fake_vartree,
4317                         }}
4318                 else:
4319                         fake_vartree.sync(acquire_lock=acquire_lock)
4320                 return fake_vartree
4321
4322         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4323                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4324                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4325                 settings = self._vartree.settings
4326                 stale_cache = set(blocker_cache)
4327                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4328                 dep_check_trees = self._dep_check_trees
4329                 vardb = fake_vartree.dbapi
4330                 installed_pkgs = list(vardb)
4331
4332                 for inst_pkg in installed_pkgs:
4333                         stale_cache.discard(inst_pkg.cpv)
4334                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4335                         if cached_blockers is not None and \
4336                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4337                                 cached_blockers = None
4338                         if cached_blockers is not None:
4339                                 blocker_atoms = cached_blockers.atoms
4340                         else:
4341                                 # Use aux_get() to trigger FakeVartree global
4342                                 # updates on *DEPEND when appropriate.
4343                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4344                                 try:
4345                                         portage.dep._dep_check_strict = False
4346                                         success, atoms = portage.dep_check(depstr,
4347                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4348                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4349                                 finally:
4350                                         portage.dep._dep_check_strict = True
4351                                 if not success:
4352                                         pkg_location = os.path.join(inst_pkg.root,
4353                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4354                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4355                                                 (pkg_location, atoms), noiselevel=-1)
4356                                         continue
4357
4358                                 blocker_atoms = [atom for atom in atoms \
4359                                         if atom.startswith("!")]
4360                                 blocker_atoms.sort()
4361                                 counter = long(inst_pkg.metadata["COUNTER"])
4362                                 blocker_cache[inst_pkg.cpv] = \
4363                                         blocker_cache.BlockerData(counter, blocker_atoms)
4364                 for cpv in stale_cache:
4365                         del blocker_cache[cpv]
4366                 blocker_cache.flush()
4367
4368                 blocker_parents = digraph()
4369                 blocker_atoms = []
4370                 for pkg in installed_pkgs:
4371                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4372                                 blocker_atom = blocker_atom.lstrip("!")
4373                                 blocker_atoms.append(blocker_atom)
4374                                 blocker_parents.add(blocker_atom, pkg)
4375
4376                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4377                 blocking_pkgs = set()
4378                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4379                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4380
4381                 # Check for blockers in the other direction.
4382                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4383                 try:
4384                         portage.dep._dep_check_strict = False
4385                         success, atoms = portage.dep_check(depstr,
4386                                 vardb, settings, myuse=new_pkg.use.enabled,
4387                                 trees=dep_check_trees, myroot=new_pkg.root)
4388                 finally:
4389                         portage.dep._dep_check_strict = True
4390                 if not success:
4391                         # We should never get this far with invalid deps.
4392                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4393                         assert False
4394
4395                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4396                         if atom[:1] == "!"]
4397                 if blocker_atoms:
4398                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4399                         for inst_pkg in installed_pkgs:
4400                                 try:
4401                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4402                                 except (portage.exception.InvalidDependString, StopIteration):
4403                                         continue
4404                                 blocking_pkgs.add(inst_pkg)
4405
4406                 return blocking_pkgs
4407
4408 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4409
4410         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4411                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4412         p_type, p_root, p_key, p_status = parent_node
4413         msg = []
4414         if p_status == "nomerge":
4415                 category, pf = portage.catsplit(p_key)
4416                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4417                 msg.append("Portage is unable to process the dependencies of the ")
4418                 msg.append("'%s' package. " % p_key)
4419                 msg.append("In order to correct this problem, the package ")
4420                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4421                 msg.append("As a temporary workaround, the --nodeps option can ")
4422                 msg.append("be used to ignore all dependencies.  For reference, ")
4423                 msg.append("the problematic dependencies can be found in the ")
4424                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4425         else:
4426                 msg.append("This package can not be installed. ")
4427                 msg.append("Please notify the '%s' package maintainer " % p_key)
4428                 msg.append("about this problem.")
4429
4430         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4431         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4432
4433 class PackageVirtualDbapi(portage.dbapi):
4434         """
4435         A dbapi-like interface class that represents the state of the installed
4436         package database as new packages are installed, replacing any packages
4437         that previously existed in the same slot. The main difference between
4438         this class and fakedbapi is that this one uses Package instances
4439         internally (passed in via cpv_inject() and cpv_remove() calls).
4440         """
4441         def __init__(self, settings):
4442                 portage.dbapi.__init__(self)
4443                 self.settings = settings
4444                 self._match_cache = {}
4445                 self._cp_map = {}
4446                 self._cpv_map = {}
4447
4448         def clear(self):
4449                 """
4450                 Remove all packages.
4451                 """
4452                 if self._cpv_map:
4453                         self._clear_cache()
4454                         self._cp_map.clear()
4455                         self._cpv_map.clear()
4456
4457         def copy(self):
4458                 obj = PackageVirtualDbapi(self.settings)
4459                 obj._match_cache = self._match_cache.copy()
4460                 obj._cp_map = self._cp_map.copy()
4461                 for k, v in obj._cp_map.iteritems():
4462                         obj._cp_map[k] = v[:]
4463                 obj._cpv_map = self._cpv_map.copy()
4464                 return obj
4465
4466         def __iter__(self):
4467                 return self._cpv_map.itervalues()
4468
4469         def __contains__(self, item):
4470                 existing = self._cpv_map.get(item.cpv)
4471                 if existing is not None and \
4472                         existing == item:
4473                         return True
4474                 return False
4475
4476         def get(self, item, default=None):
4477                 cpv = getattr(item, "cpv", None)
4478                 if cpv is None:
4479                         if len(item) != 4:
4480                                 return default
4481                         type_name, root, cpv, operation = item
4482
4483                 existing = self._cpv_map.get(cpv)
4484                 if existing is not None and \
4485                         existing == item:
4486                         return existing
4487                 return default
4488
4489         def match_pkgs(self, atom):
4490                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4491
4492         def _clear_cache(self):
4493                 if self._categories is not None:
4494                         self._categories = None
4495                 if self._match_cache:
4496                         self._match_cache = {}
4497
4498         def match(self, origdep, use_cache=1):
4499                 result = self._match_cache.get(origdep)
4500                 if result is not None:
4501                         return result[:]
4502                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4503                 self._match_cache[origdep] = result
4504                 return result[:]
4505
4506         def cpv_exists(self, cpv):
4507                 return cpv in self._cpv_map
4508
4509         def cp_list(self, mycp, use_cache=1):
4510                 cachelist = self._match_cache.get(mycp)
4511                 # cp_list() doesn't expand old-style virtuals
4512                 if cachelist and cachelist[0].startswith(mycp):
4513                         return cachelist[:]
4514                 cpv_list = self._cp_map.get(mycp)
4515                 if cpv_list is None:
4516                         cpv_list = []
4517                 else:
4518                         cpv_list = [pkg.cpv for pkg in cpv_list]
4519                 self._cpv_sort_ascending(cpv_list)
4520                 if not (not cpv_list and mycp.startswith("virtual/")):
4521                         self._match_cache[mycp] = cpv_list
4522                 return cpv_list[:]
4523
4524         def cp_all(self):
4525                 return list(self._cp_map)
4526
4527         def cpv_all(self):
4528                 return list(self._cpv_map)
4529
4530         def cpv_inject(self, pkg):
4531                 cp_list = self._cp_map.get(pkg.cp)
4532                 if cp_list is None:
4533                         cp_list = []
4534                         self._cp_map[pkg.cp] = cp_list
4535                 e_pkg = self._cpv_map.get(pkg.cpv)
4536                 if e_pkg is not None:
4537                         if e_pkg == pkg:
4538                                 return
4539                         self.cpv_remove(e_pkg)
4540                 for e_pkg in cp_list:
4541                         if e_pkg.slot_atom == pkg.slot_atom:
4542                                 if e_pkg == pkg:
4543                                         return
4544                                 self.cpv_remove(e_pkg)
4545                                 break
4546                 cp_list.append(pkg)
4547                 self._cpv_map[pkg.cpv] = pkg
4548                 self._clear_cache()
4549
4550         def cpv_remove(self, pkg):
4551                 old_pkg = self._cpv_map.get(pkg.cpv)
4552                 if old_pkg != pkg:
4553                         raise KeyError(pkg)
4554                 self._cp_map[pkg.cp].remove(pkg)
4555                 del self._cpv_map[pkg.cpv]
4556                 self._clear_cache()
4557
4558         def aux_get(self, cpv, wants):
4559                 metadata = self._cpv_map[cpv].metadata
4560                 return [metadata.get(x, "") for x in wants]
4561
4562         def aux_update(self, cpv, values):
4563                 self._cpv_map[cpv].metadata.update(values)
4564                 self._clear_cache()
4565
4566 class depgraph(object):
4567
4568         pkg_tree_map = RootConfig.pkg_tree_map
4569
4570         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4571
4572         def __init__(self, settings, trees, myopts, myparams, spinner):
4573                 self.settings = settings
4574                 self.target_root = settings["ROOT"]
4575                 self.myopts = myopts
4576                 self.myparams = myparams
4577                 self.edebug = 0
4578                 if settings.get("PORTAGE_DEBUG", "") == "1":
4579                         self.edebug = 1
4580                 self.spinner = spinner
4581                 self._running_root = trees["/"]["root_config"]
4582                 self._opts_no_restart = Scheduler._opts_no_restart
4583                 self.pkgsettings = {}
4584                 # Maps slot atom to package for each Package added to the graph.
4585                 self._slot_pkg_map = {}
4586                 # Maps nodes to the reasons they were selected for reinstallation.
4587                 self._reinstall_nodes = {}
4588                 self.mydbapi = {}
4589                 self.trees = {}
4590                 self._trees_orig = trees
4591                 self.roots = {}
4592                 # Contains a filtered view of preferred packages that are selected
4593                 # from available repositories.
4594                 self._filtered_trees = {}
4595                 # Contains installed packages and new packages that have been added
4596                 # to the graph.
4597                 self._graph_trees = {}
4598                 # All Package instances
4599                 self._pkg_cache = {}
4600                 for myroot in trees:
4601                         self.trees[myroot] = {}
4602                         # Create a RootConfig instance that references
4603                         # the FakeVartree instead of the real one.
4604                         self.roots[myroot] = RootConfig(
4605                                 trees[myroot]["vartree"].settings,
4606                                 self.trees[myroot],
4607                                 trees[myroot]["root_config"].setconfig)
4608                         for tree in ("porttree", "bintree"):
4609                                 self.trees[myroot][tree] = trees[myroot][tree]
4610                         self.trees[myroot]["vartree"] = \
4611                                 FakeVartree(trees[myroot]["root_config"],
4612                                         pkg_cache=self._pkg_cache)
4613                         self.pkgsettings[myroot] = portage.config(
4614                                 clone=self.trees[myroot]["vartree"].settings)
4615                         self._slot_pkg_map[myroot] = {}
4616                         vardb = self.trees[myroot]["vartree"].dbapi
4617                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4618                                 "--buildpkgonly" not in self.myopts
4619                         # This fakedbapi instance will model the state that the vdb will
4620                         # have after new packages have been installed.
4621                         fakedb = PackageVirtualDbapi(vardb.settings)
4622                         if preload_installed_pkgs:
4623                                 for pkg in vardb:
4624                                         self.spinner.update()
4625                                         # This triggers metadata updates via FakeVartree.
4626                                         vardb.aux_get(pkg.cpv, [])
4627                                         fakedb.cpv_inject(pkg)
4628
4629                         # Now that the vardb state is cached in our FakeVartree,
4630                         # we won't be needing the real vartree cache for awhile.
4631                         # To make some room on the heap, clear the vardbapi
4632                         # caches.
4633                         trees[myroot]["vartree"].dbapi._clear_cache()
4634                         gc.collect()
4635
4636                         self.mydbapi[myroot] = fakedb
4637                         def graph_tree():
4638                                 pass
4639                         graph_tree.dbapi = fakedb
4640                         self._graph_trees[myroot] = {}
4641                         self._filtered_trees[myroot] = {}
4642                         # Substitute the graph tree for the vartree in dep_check() since we
4643                         # want atom selections to be consistent with package selections
4644                         # have already been made.
4645                         self._graph_trees[myroot]["porttree"]   = graph_tree
4646                         self._graph_trees[myroot]["vartree"]    = graph_tree
4647                         def filtered_tree():
4648                                 pass
4649                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4650                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4651
4652                         # Passing in graph_tree as the vartree here could lead to better
4653                         # atom selections in some cases by causing atoms for packages that
4654                         # have been added to the graph to be preferred over other choices.
4655                         # However, it can trigger atom selections that result in
4656                         # unresolvable direct circular dependencies. For example, this
4657                         # happens with gwydion-dylan which depends on either itself or
4658                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4659                         # gwydion-dylan-bin needs to be selected in order to avoid a
4660                         # an unresolvable direct circular dependency.
4661                         #
4662                         # To solve the problem described above, pass in "graph_db" so that
4663                         # packages that have been added to the graph are distinguishable
4664                         # from other available packages and installed packages. Also, pass
4665                         # the parent package into self._select_atoms() calls so that
4666                         # unresolvable direct circular dependencies can be detected and
4667                         # avoided when possible.
4668                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4669                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4670
4671                         dbs = []
4672                         portdb = self.trees[myroot]["porttree"].dbapi
4673                         bindb  = self.trees[myroot]["bintree"].dbapi
4674                         vardb  = self.trees[myroot]["vartree"].dbapi
4675                         #               (db, pkg_type, built, installed, db_keys)
4676                         if "--usepkgonly" not in self.myopts:
4677                                 db_keys = list(portdb._aux_cache_keys)
4678                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4679                         if "--usepkg" in self.myopts:
4680                                 db_keys = list(bindb._aux_cache_keys)
4681                                 dbs.append((bindb,  "binary", True, False, db_keys))
4682                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4683                         dbs.append((vardb, "installed", True, True, db_keys))
4684                         self._filtered_trees[myroot]["dbs"] = dbs
4685                         if "--usepkg" in self.myopts:
4686                                 self.trees[myroot]["bintree"].populate(
4687                                         "--getbinpkg" in self.myopts,
4688                                         "--getbinpkgonly" in self.myopts)
4689                 del trees
4690
4691                 self.digraph=portage.digraph()
4692                 # contains all sets added to the graph
4693                 self._sets = {}
4694                 # contains atoms given as arguments
4695                 self._sets["args"] = InternalPackageSet()
4696                 # contains all atoms from all sets added to the graph, including
4697                 # atoms given as arguments
4698                 self._set_atoms = InternalPackageSet()
4699                 self._atom_arg_map = {}
4700                 # contains all nodes pulled in by self._set_atoms
4701                 self._set_nodes = set()
4702                 # Contains only Blocker -> Uninstall edges
4703                 self._blocker_uninstalls = digraph()
4704                 # Contains only Package -> Blocker edges
4705                 self._blocker_parents = digraph()
4706                 # Contains only irrelevant Package -> Blocker edges
4707                 self._irrelevant_blockers = digraph()
4708                 # Contains only unsolvable Package -> Blocker edges
4709                 self._unsolvable_blockers = digraph()
4710                 # Contains all Blocker -> Blocked Package edges
4711                 self._blocked_pkgs = digraph()
4712                 # Contains world packages that have been protected from
4713                 # uninstallation but may not have been added to the graph
4714                 # if the graph is not complete yet.
4715                 self._blocked_world_pkgs = {}
4716                 self._slot_collision_info = {}
4717                 # Slot collision nodes are not allowed to block other packages since
4718                 # blocker validation is only able to account for one package per slot.
4719                 self._slot_collision_nodes = set()
4720                 self._parent_atoms = {}
4721                 self._slot_conflict_parent_atoms = set()
4722                 self._serialized_tasks_cache = None
4723                 self._scheduler_graph = None
4724                 self._displayed_list = None
4725                 self._pprovided_args = []
4726                 self._missing_args = []
4727                 self._masked_installed = set()
4728                 self._unsatisfied_deps_for_display = []
4729                 self._unsatisfied_blockers_for_display = None
4730                 self._circular_deps_for_display = None
4731                 self._dep_stack = []
4732                 self._unsatisfied_deps = []
4733                 self._initially_unsatisfied_deps = []
4734                 self._ignored_deps = []
4735                 self._required_set_names = set(["system", "world"])
4736                 self._select_atoms = self._select_atoms_highest_available
4737                 self._select_package = self._select_pkg_highest_available
4738                 self._highest_pkg_cache = {}
4739
4740         def _show_slot_collision_notice(self):
4741                 """Show an informational message advising the user to mask one of the
4742                 the packages. In some cases it may be possible to resolve this
4743                 automatically, but support for backtracking (removal nodes that have
4744                 already been selected) will be required in order to handle all possible
4745                 cases.
4746                 """
4747
4748                 if not self._slot_collision_info:
4749                         return
4750
4751                 self._show_merge_list()
4752
4753                 msg = []
4754                 msg.append("\n!!! Multiple package instances within a single " + \
4755                         "package slot have been pulled\n")
4756                 msg.append("!!! into the dependency graph, resulting" + \
4757                         " in a slot conflict:\n\n")
4758                 indent = "  "
4759                 # Max number of parents shown, to avoid flooding the display.
4760                 max_parents = 3
4761                 explanation_columns = 70
4762                 explanations = 0
4763                 for (slot_atom, root), slot_nodes \
4764                         in self._slot_collision_info.iteritems():
4765                         msg.append(str(slot_atom))
4766                         msg.append("\n\n")
4767
4768                         for node in slot_nodes:
4769                                 msg.append(indent)
4770                                 msg.append(str(node))
4771                                 parent_atoms = self._parent_atoms.get(node)
4772                                 if parent_atoms:
4773                                         pruned_list = set()
4774                                         # Prefer conflict atoms over others.
4775                                         for parent_atom in parent_atoms:
4776                                                 if len(pruned_list) >= max_parents:
4777                                                         break
4778                                                 if parent_atom in self._slot_conflict_parent_atoms:
4779                                                         pruned_list.add(parent_atom)
4780
4781                                         # If this package was pulled in by conflict atoms then
4782                                         # show those alone since those are the most interesting.
4783                                         if not pruned_list:
4784                                                 # When generating the pruned list, prefer instances
4785                                                 # of DependencyArg over instances of Package.
4786                                                 for parent_atom in parent_atoms:
4787                                                         if len(pruned_list) >= max_parents:
4788                                                                 break
4789                                                         parent, atom = parent_atom
4790                                                         if isinstance(parent, DependencyArg):
4791                                                                 pruned_list.add(parent_atom)
4792                                                 # Prefer Packages instances that themselves have been
4793                                                 # pulled into collision slots.
4794                                                 for parent_atom in parent_atoms:
4795                                                         if len(pruned_list) >= max_parents:
4796                                                                 break
4797                                                         parent, atom = parent_atom
4798                                                         if isinstance(parent, Package) and \
4799                                                                 (parent.slot_atom, parent.root) \
4800                                                                 in self._slot_collision_info:
4801                                                                 pruned_list.add(parent_atom)
4802                                                 for parent_atom in parent_atoms:
4803                                                         if len(pruned_list) >= max_parents:
4804                                                                 break
4805                                                         pruned_list.add(parent_atom)
4806                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4807                                         parent_atoms = pruned_list
4808                                         msg.append(" pulled in by\n")
4809                                         for parent_atom in parent_atoms:
4810                                                 parent, atom = parent_atom
4811                                                 msg.append(2*indent)
4812                                                 if isinstance(parent,
4813                                                         (PackageArg, AtomArg)):
4814                                                         # For PackageArg and AtomArg types, it's
4815                                                         # redundant to display the atom attribute.
4816                                                         msg.append(str(parent))
4817                                                 else:
4818                                                         # Display the specific atom from SetArg or
4819                                                         # Package types.
4820                                                         msg.append("%s required by %s" % (atom, parent))
4821                                                 msg.append("\n")
4822                                         if omitted_parents:
4823                                                 msg.append(2*indent)
4824                                                 msg.append("(and %d more)\n" % omitted_parents)
4825                                 else:
4826                                         msg.append(" (no parents)\n")
4827                                 msg.append("\n")
4828                         explanation = self._slot_conflict_explanation(slot_nodes)
4829                         if explanation:
4830                                 explanations += 1
4831                                 msg.append(indent + "Explanation:\n\n")
4832                                 for line in textwrap.wrap(explanation, explanation_columns):
4833                                         msg.append(2*indent + line + "\n")
4834                                 msg.append("\n")
4835                 msg.append("\n")
4836                 sys.stderr.write("".join(msg))
4837                 sys.stderr.flush()
4838
4839                 explanations_for_all = explanations == len(self._slot_collision_info)
4840
4841                 if explanations_for_all or "--quiet" in self.myopts:
4842                         return
4843
4844                 msg = []
4845                 msg.append("It may be possible to solve this problem ")
4846                 msg.append("by using package.mask to prevent one of ")
4847                 msg.append("those packages from being selected. ")
4848                 msg.append("However, it is also possible that conflicting ")
4849                 msg.append("dependencies exist such that they are impossible to ")
4850                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4851                 msg.append("the dependencies of two different packages, then those ")
4852                 msg.append("packages can not be installed simultaneously.")
4853
4854                 from formatter import AbstractFormatter, DumbWriter
4855                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4856                 for x in msg:
4857                         f.add_flowing_data(x)
4858                 f.end_paragraph(1)
4859
4860                 msg = []
4861                 msg.append("For more information, see MASKED PACKAGES ")
4862                 msg.append("section in the emerge man page or refer ")
4863                 msg.append("to the Gentoo Handbook.")
4864                 for x in msg:
4865                         f.add_flowing_data(x)
4866                 f.end_paragraph(1)
4867                 f.writer.flush()
4868
4869         def _slot_conflict_explanation(self, slot_nodes):
4870                 """
4871                 When a slot conflict occurs due to USE deps, there are a few
4872                 different cases to consider:
4873
4874                 1) New USE are correctly set but --newuse wasn't requested so an
4875                    installed package with incorrect USE happened to get pulled
4876                    into graph before the new one.
4877
4878                 2) New USE are incorrectly set but an installed package has correct
4879                    USE so it got pulled into the graph, and a new instance also got
4880                    pulled in due to --newuse or an upgrade.
4881
4882                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4883                    and multiple package instances got pulled into the same slot to
4884                    satisfy the conflicting deps.
4885
4886                 Currently, explanations and suggested courses of action are generated
4887                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4888                 """
4889
4890                 if len(slot_nodes) != 2:
4891                         # Suggestions are only implemented for
4892                         # conflicts between two packages.
4893                         return None
4894
4895                 all_conflict_atoms = self._slot_conflict_parent_atoms
4896                 matched_node = None
4897                 matched_atoms = None
4898                 unmatched_node = None
4899                 for node in slot_nodes:
4900                         parent_atoms = self._parent_atoms.get(node)
4901                         if not parent_atoms:
4902                                 # Normally, there are always parent atoms. If there are
4903                                 # none then something unexpected is happening and there's
4904                                 # currently no suggestion for this case.
4905                                 return None
4906                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4907                         for parent_atom in conflict_atoms:
4908                                 parent, atom = parent_atom
4909                                 if not atom.use:
4910                                         # Suggestions are currently only implemented for cases
4911                                         # in which all conflict atoms have USE deps.
4912                                         return None
4913                         if conflict_atoms:
4914                                 if matched_node is not None:
4915                                         # If conflict atoms match multiple nodes
4916                                         # then there's no suggestion.
4917                                         return None
4918                                 matched_node = node
4919                                 matched_atoms = conflict_atoms
4920                         else:
4921                                 if unmatched_node is not None:
4922                                         # Neither node is matched by conflict atoms, and
4923                                         # there is no suggestion for this case.
4924                                         return None
4925                                 unmatched_node = node
4926
4927                 if matched_node is None or unmatched_node is None:
4928                         # This shouldn't happen.
4929                         return None
4930
4931                 if unmatched_node.installed and not matched_node.installed and \
4932                         unmatched_node.cpv == matched_node.cpv:
4933                         # If the conflicting packages are the same version then
4934                         # --newuse should be all that's needed. If they are different
4935                         # versions then there's some other problem.
4936                         return "New USE are correctly set, but --newuse wasn't" + \
4937                                 " requested, so an installed package with incorrect USE " + \
4938                                 "happened to get pulled into the dependency graph. " + \
4939                                 "In order to solve " + \
4940                                 "this, either specify the --newuse option or explicitly " + \
4941                                 " reinstall '%s'." % matched_node.slot_atom
4942
4943                 if matched_node.installed and not unmatched_node.installed:
4944                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4945                         explanation = ("New USE for '%s' are incorrectly set. " + \
4946                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4947                                 (matched_node.slot_atom, atoms[0])
4948                         if len(atoms) > 1:
4949                                 for atom in atoms[1:-1]:
4950                                         explanation += ", '%s'" % (atom,)
4951                                 if len(atoms) > 2:
4952                                         explanation += ","
4953                                 explanation += " and '%s'" % (atoms[-1],)
4954                         explanation += "."
4955                         return explanation
4956
4957                 return None
4958
4959         def _process_slot_conflicts(self):
4960                 """
4961                 Process slot conflict data to identify specific atoms which
4962                 lead to conflict. These atoms only match a subset of the
4963                 packages that have been pulled into a given slot.
4964                 """
4965                 for (slot_atom, root), slot_nodes \
4966                         in self._slot_collision_info.iteritems():
4967
4968                         all_parent_atoms = set()
4969                         for pkg in slot_nodes:
4970                                 parent_atoms = self._parent_atoms.get(pkg)
4971                                 if not parent_atoms:
4972                                         continue
4973                                 all_parent_atoms.update(parent_atoms)
4974
4975                         for pkg in slot_nodes:
4976                                 parent_atoms = self._parent_atoms.get(pkg)
4977                                 if parent_atoms is None:
4978                                         parent_atoms = set()
4979                                         self._parent_atoms[pkg] = parent_atoms
4980                                 for parent_atom in all_parent_atoms:
4981                                         if parent_atom in parent_atoms:
4982                                                 continue
4983                                         # Use package set for matching since it will match via
4984                                         # PROVIDE when necessary, while match_from_list does not.
4985                                         parent, atom = parent_atom
4986                                         atom_set = InternalPackageSet(
4987                                                 initial_atoms=(atom,))
4988                                         if atom_set.findAtomForPackage(pkg):
4989                                                 parent_atoms.add(parent_atom)
4990                                         else:
4991                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4992
4993         def _reinstall_for_flags(self, forced_flags,
4994                 orig_use, orig_iuse, cur_use, cur_iuse):
4995                 """Return a set of flags that trigger reinstallation, or None if there
4996                 are no such flags."""
4997                 if "--newuse" in self.myopts:
4998                         flags = set(orig_iuse.symmetric_difference(
4999                                 cur_iuse).difference(forced_flags))
5000                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5001                                 cur_iuse.intersection(cur_use)))
5002                         if flags:
5003                                 return flags
5004                 elif "changed-use" == self.myopts.get("--reinstall"):
5005                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
5006                                 cur_iuse.intersection(cur_use))
5007                         if flags:
5008                                 return flags
5009                 return None
5010
5011         def _create_graph(self, allow_unsatisfied=False):
5012                 dep_stack = self._dep_stack
5013                 while dep_stack:
5014                         self.spinner.update()
5015                         dep = dep_stack.pop()
5016                         if isinstance(dep, Package):
5017                                 if not self._add_pkg_deps(dep,
5018                                         allow_unsatisfied=allow_unsatisfied):
5019                                         return 0
5020                                 continue
5021                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5022                                 return 0
5023                 return 1
5024
5025         def _add_dep(self, dep, allow_unsatisfied=False):
5026                 debug = "--debug" in self.myopts
5027                 buildpkgonly = "--buildpkgonly" in self.myopts
5028                 nodeps = "--nodeps" in self.myopts
5029                 empty = "empty" in self.myparams
5030                 deep = "deep" in self.myparams
5031                 update = "--update" in self.myopts and dep.depth <= 1
5032                 if dep.blocker:
5033                         if not buildpkgonly and \
5034                                 not nodeps and \
5035                                 dep.parent not in self._slot_collision_nodes:
5036                                 if dep.parent.onlydeps:
5037                                         # It's safe to ignore blockers if the
5038                                         # parent is an --onlydeps node.
5039                                         return 1
5040                                 # The blocker applies to the root where
5041                                 # the parent is or will be installed.
5042                                 blocker = Blocker(atom=dep.atom,
5043                                         eapi=dep.parent.metadata["EAPI"],
5044                                         root=dep.parent.root)
5045                                 self._blocker_parents.add(blocker, dep.parent)
5046                         return 1
5047                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5048                         onlydeps=dep.onlydeps)
5049                 if not dep_pkg:
5050                         if dep.priority.optional:
5051                                 # This could be an unecessary build-time dep
5052                                 # pulled in by --with-bdeps=y.
5053                                 return 1
5054                         if allow_unsatisfied:
5055                                 self._unsatisfied_deps.append(dep)
5056                                 return 1
5057                         self._unsatisfied_deps_for_display.append(
5058                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5059                         return 0
5060                 # In some cases, dep_check will return deps that shouldn't
5061                 # be proccessed any further, so they are identified and
5062                 # discarded here. Try to discard as few as possible since
5063                 # discarded dependencies reduce the amount of information
5064                 # available for optimization of merge order.
5065                 if dep.priority.satisfied and \
5066                         not dep_pkg.installed and \
5067                         not (existing_node or empty or deep or update):
5068                         myarg = None
5069                         if dep.root == self.target_root:
5070                                 try:
5071                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5072                                 except StopIteration:
5073                                         pass
5074                                 except portage.exception.InvalidDependString:
5075                                         if not dep_pkg.installed:
5076                                                 # This shouldn't happen since the package
5077                                                 # should have been masked.
5078                                                 raise
5079                         if not myarg:
5080                                 self._ignored_deps.append(dep)
5081                                 return 1
5082
5083                 if not self._add_pkg(dep_pkg, dep):
5084                         return 0
5085                 return 1
5086
5087         def _add_pkg(self, pkg, dep):
5088                 myparent = None
5089                 priority = None
5090                 depth = 0
5091                 if dep is None:
5092                         dep = Dependency()
5093                 else:
5094                         myparent = dep.parent
5095                         priority = dep.priority
5096                         depth = dep.depth
5097                 if priority is None:
5098                         priority = DepPriority()
5099                 """
5100                 Fills the digraph with nodes comprised of packages to merge.
5101                 mybigkey is the package spec of the package to merge.
5102                 myparent is the package depending on mybigkey ( or None )
5103                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5104                         Think --onlydeps, we need to ignore packages in that case.
5105                 #stuff to add:
5106                 #SLOT-aware emerge
5107                 #IUSE-aware emerge -> USE DEP aware depgraph
5108                 #"no downgrade" emerge
5109                 """
5110                 # Ensure that the dependencies of the same package
5111                 # are never processed more than once.
5112                 previously_added = pkg in self.digraph
5113
5114                 # select the correct /var database that we'll be checking against
5115                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5116                 pkgsettings = self.pkgsettings[pkg.root]
5117
5118                 arg_atoms = None
5119                 if True:
5120                         try:
5121                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5122                         except portage.exception.InvalidDependString, e:
5123                                 if not pkg.installed:
5124                                         show_invalid_depstring_notice(
5125                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5126                                         return 0
5127                                 del e
5128
5129                 if not pkg.onlydeps:
5130                         if not pkg.installed and \
5131                                 "empty" not in self.myparams and \
5132                                 vardbapi.match(pkg.slot_atom):
5133                                 # Increase the priority of dependencies on packages that
5134                                 # are being rebuilt. This optimizes merge order so that
5135                                 # dependencies are rebuilt/updated as soon as possible,
5136                                 # which is needed especially when emerge is called by
5137                                 # revdep-rebuild since dependencies may be affected by ABI
5138                                 # breakage that has rendered them useless. Don't adjust
5139                                 # priority here when in "empty" mode since all packages
5140                                 # are being merged in that case.
5141                                 priority.rebuild = True
5142
5143                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5144                         slot_collision = False
5145                         if existing_node:
5146                                 existing_node_matches = pkg.cpv == existing_node.cpv
5147                                 if existing_node_matches and \
5148                                         pkg != existing_node and \
5149                                         dep.atom is not None:
5150                                         # Use package set for matching since it will match via
5151                                         # PROVIDE when necessary, while match_from_list does not.
5152                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5153                                         if not atom_set.findAtomForPackage(existing_node):
5154                                                 existing_node_matches = False
5155                                 if existing_node_matches:
5156                                         # The existing node can be reused.
5157                                         if arg_atoms:
5158                                                 for parent_atom in arg_atoms:
5159                                                         parent, atom = parent_atom
5160                                                         self.digraph.add(existing_node, parent,
5161                                                                 priority=priority)
5162                                                         self._add_parent_atom(existing_node, parent_atom)
5163                                         # If a direct circular dependency is not an unsatisfied
5164                                         # buildtime dependency then drop it here since otherwise
5165                                         # it can skew the merge order calculation in an unwanted
5166                                         # way.
5167                                         if existing_node != myparent or \
5168                                                 (priority.buildtime and not priority.satisfied):
5169                                                 self.digraph.addnode(existing_node, myparent,
5170                                                         priority=priority)
5171                                                 if dep.atom is not None and dep.parent is not None:
5172                                                         self._add_parent_atom(existing_node,
5173                                                                 (dep.parent, dep.atom))
5174                                         return 1
5175                                 else:
5176
5177                                         # A slot collision has occurred.  Sometimes this coincides
5178                                         # with unresolvable blockers, so the slot collision will be
5179                                         # shown later if there are no unresolvable blockers.
5180                                         self._add_slot_conflict(pkg)
5181                                         slot_collision = True
5182
5183                         if slot_collision:
5184                                 # Now add this node to the graph so that self.display()
5185                                 # can show use flags and --tree portage.output.  This node is
5186                                 # only being partially added to the graph.  It must not be
5187                                 # allowed to interfere with the other nodes that have been
5188                                 # added.  Do not overwrite data for existing nodes in
5189                                 # self.mydbapi since that data will be used for blocker
5190                                 # validation.
5191                                 # Even though the graph is now invalid, continue to process
5192                                 # dependencies so that things like --fetchonly can still
5193                                 # function despite collisions.
5194                                 pass
5195                         elif not previously_added:
5196                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5197                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5198                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5199
5200                         if not pkg.installed:
5201                                 # Allow this package to satisfy old-style virtuals in case it
5202                                 # doesn't already. Any pre-existing providers will be preferred
5203                                 # over this one.
5204                                 try:
5205                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5206                                         # For consistency, also update the global virtuals.
5207                                         settings = self.roots[pkg.root].settings
5208                                         settings.unlock()
5209                                         settings.setinst(pkg.cpv, pkg.metadata)
5210                                         settings.lock()
5211                                 except portage.exception.InvalidDependString, e:
5212                                         show_invalid_depstring_notice(
5213                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5214                                         del e
5215                                         return 0
5216
5217                 if arg_atoms:
5218                         self._set_nodes.add(pkg)
5219
5220                 # Do this even when addme is False (--onlydeps) so that the
5221                 # parent/child relationship is always known in case
5222                 # self._show_slot_collision_notice() needs to be called later.
5223                 self.digraph.add(pkg, myparent, priority=priority)
5224                 if dep.atom is not None and dep.parent is not None:
5225                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5226
5227                 if arg_atoms:
5228                         for parent_atom in arg_atoms:
5229                                 parent, atom = parent_atom
5230                                 self.digraph.add(pkg, parent, priority=priority)
5231                                 self._add_parent_atom(pkg, parent_atom)
5232
5233                 """ This section determines whether we go deeper into dependencies or not.
5234                     We want to go deeper on a few occasions:
5235                     Installing package A, we need to make sure package A's deps are met.
5236                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5237                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5238                 """
5239                 dep_stack = self._dep_stack
5240                 if "recurse" not in self.myparams:
5241                         return 1
5242                 elif pkg.installed and \
5243                         "deep" not in self.myparams:
5244                         dep_stack = self._ignored_deps
5245
5246                 self.spinner.update()
5247
5248                 if arg_atoms:
5249                         depth = 0
5250                 pkg.depth = depth
5251                 if not previously_added:
5252                         dep_stack.append(pkg)
5253                 return 1
5254
5255         def _add_parent_atom(self, pkg, parent_atom):
5256                 parent_atoms = self._parent_atoms.get(pkg)
5257                 if parent_atoms is None:
5258                         parent_atoms = set()
5259                         self._parent_atoms[pkg] = parent_atoms
5260                 parent_atoms.add(parent_atom)
5261
5262         def _add_slot_conflict(self, pkg):
5263                 self._slot_collision_nodes.add(pkg)
5264                 slot_key = (pkg.slot_atom, pkg.root)
5265                 slot_nodes = self._slot_collision_info.get(slot_key)
5266                 if slot_nodes is None:
5267                         slot_nodes = set()
5268                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5269                         self._slot_collision_info[slot_key] = slot_nodes
5270                 slot_nodes.add(pkg)
5271
5272         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5273
5274                 mytype = pkg.type_name
5275                 myroot = pkg.root
5276                 mykey = pkg.cpv
5277                 metadata = pkg.metadata
5278                 myuse = pkg.use.enabled
5279                 jbigkey = pkg
5280                 depth = pkg.depth + 1
5281                 removal_action = "remove" in self.myparams
5282
5283                 edepend={}
5284                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5285                 for k in depkeys:
5286                         edepend[k] = metadata[k]
5287
5288                 if not pkg.built and \
5289                         "--buildpkgonly" in self.myopts and \
5290                         "deep" not in self.myparams and \
5291                         "empty" not in self.myparams:
5292                         edepend["RDEPEND"] = ""
5293                         edepend["PDEPEND"] = ""
5294                 bdeps_optional = False
5295
5296                 if pkg.built and not removal_action:
5297                         if self.myopts.get("--with-bdeps", "n") == "y":
5298                                 # Pull in build time deps as requested, but marked them as
5299                                 # "optional" since they are not strictly required. This allows
5300                                 # more freedom in the merge order calculation for solving
5301                                 # circular dependencies. Don't convert to PDEPEND since that
5302                                 # could make --with-bdeps=y less effective if it is used to
5303                                 # adjust merge order to prevent built_with_use() calls from
5304                                 # failing.
5305                                 bdeps_optional = True
5306                         else:
5307                                 # built packages do not have build time dependencies.
5308                                 edepend["DEPEND"] = ""
5309
5310                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5311                         edepend["DEPEND"] = ""
5312
5313                 bdeps_root = "/"
5314                 root_deps = self.myopts.get("--root-deps")
5315                 if root_deps is not None:
5316                         if root_deps is True:
5317                                 bdeps_root = myroot
5318                         elif root_deps == "rdeps":
5319                                 edepend["DEPEND"] = ""
5320
5321                 deps = (
5322                         (bdeps_root, edepend["DEPEND"],
5323                                 self._priority(buildtime=(not bdeps_optional),
5324                                 optional=bdeps_optional)),
5325                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5326                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5327                 )
5328
5329                 debug = "--debug" in self.myopts
5330                 strict = mytype != "installed"
5331                 try:
5332                         for dep_root, dep_string, dep_priority in deps:
5333                                 if not dep_string:
5334                                         continue
5335                                 if debug:
5336                                         print
5337                                         print "Parent:   ", jbigkey
5338                                         print "Depstring:", dep_string
5339                                         print "Priority:", dep_priority
5340                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5341                                 try:
5342                                         selected_atoms = self._select_atoms(dep_root,
5343                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5344                                                 priority=dep_priority)
5345                                 except portage.exception.InvalidDependString, e:
5346                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5347                                         return 0
5348                                 if debug:
5349                                         print "Candidates:", selected_atoms
5350
5351                                 for atom in selected_atoms:
5352                                         try:
5353
5354                                                 atom = portage.dep.Atom(atom)
5355
5356                                                 mypriority = dep_priority.copy()
5357                                                 if not atom.blocker and vardb.match(atom):
5358                                                         mypriority.satisfied = True
5359
5360                                                 if not self._add_dep(Dependency(atom=atom,
5361                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5362                                                         priority=mypriority, root=dep_root),
5363                                                         allow_unsatisfied=allow_unsatisfied):
5364                                                         return 0
5365
5366                                         except portage.exception.InvalidAtom, e:
5367                                                 show_invalid_depstring_notice(
5368                                                         pkg, dep_string, str(e))
5369                                                 del e
5370                                                 if not pkg.installed:
5371                                                         return 0
5372
5373                                 if debug:
5374                                         print "Exiting...", jbigkey
5375                 except portage.exception.AmbiguousPackageName, e:
5376                         pkgs = e.args[0]
5377                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5378                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5379                         for cpv in pkgs:
5380                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5381                         portage.writemsg("\n", noiselevel=-1)
5382                         if mytype == "binary":
5383                                 portage.writemsg(
5384                                         "!!! This binary package cannot be installed: '%s'\n" % \
5385                                         mykey, noiselevel=-1)
5386                         elif mytype == "ebuild":
5387                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5388                                 myebuild, mylocation = portdb.findname2(mykey)
5389                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5390                                         "'%s'\n" % myebuild, noiselevel=-1)
5391                         portage.writemsg("!!! Please notify the package maintainer " + \
5392                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5393                         return 0
5394                 return 1
5395
5396         def _priority(self, **kwargs):
5397                 if "remove" in self.myparams:
5398                         priority_constructor = UnmergeDepPriority
5399                 else:
5400                         priority_constructor = DepPriority
5401                 return priority_constructor(**kwargs)
5402
5403         def _dep_expand(self, root_config, atom_without_category):
5404                 """
5405                 @param root_config: a root config instance
5406                 @type root_config: RootConfig
5407                 @param atom_without_category: an atom without a category component
5408                 @type atom_without_category: String
5409                 @rtype: list
5410                 @returns: a list of atoms containing categories (possibly empty)
5411                 """
5412                 null_cp = portage.dep_getkey(insert_category_into_atom(
5413                         atom_without_category, "null"))
5414                 cat, atom_pn = portage.catsplit(null_cp)
5415
5416                 dbs = self._filtered_trees[root_config.root]["dbs"]
5417                 categories = set()
5418                 for db, pkg_type, built, installed, db_keys in dbs:
5419                         for cat in db.categories:
5420                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5421                                         categories.add(cat)
5422
5423                 deps = []
5424                 for cat in categories:
5425                         deps.append(insert_category_into_atom(
5426                                 atom_without_category, cat))
5427                 return deps
5428
5429         def _have_new_virt(self, root, atom_cp):
5430                 ret = False
5431                 for db, pkg_type, built, installed, db_keys in \
5432                         self._filtered_trees[root]["dbs"]:
5433                         if db.cp_list(atom_cp):
5434                                 ret = True
5435                                 break
5436                 return ret
5437
5438         def _iter_atoms_for_pkg(self, pkg):
5439                 # TODO: add multiple $ROOT support
5440                 if pkg.root != self.target_root:
5441                         return
5442                 atom_arg_map = self._atom_arg_map
5443                 root_config = self.roots[pkg.root]
5444                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5445                         atom_cp = portage.dep_getkey(atom)
5446                         if atom_cp != pkg.cp and \
5447                                 self._have_new_virt(pkg.root, atom_cp):
5448                                 continue
5449                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5450                         visible_pkgs.reverse() # descending order
5451                         higher_slot = None
5452                         for visible_pkg in visible_pkgs:
5453                                 if visible_pkg.cp != atom_cp:
5454                                         continue
5455                                 if pkg >= visible_pkg:
5456                                         # This is descending order, and we're not
5457                                         # interested in any versions <= pkg given.
5458                                         break
5459                                 if pkg.slot_atom != visible_pkg.slot_atom:
5460                                         higher_slot = visible_pkg
5461                                         break
5462                         if higher_slot is not None:
5463                                 continue
5464                         for arg in atom_arg_map[(atom, pkg.root)]:
5465                                 if isinstance(arg, PackageArg) and \
5466                                         arg.package != pkg:
5467                                         continue
5468                                 yield arg, atom
5469
5470         def select_files(self, myfiles):
5471                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5472                 appropriate depgraph and return a favorite list."""
5473                 debug = "--debug" in self.myopts
5474                 root_config = self.roots[self.target_root]
5475                 sets = root_config.sets
5476                 getSetAtoms = root_config.setconfig.getSetAtoms
5477                 myfavorites=[]
5478                 myroot = self.target_root
5479                 dbs = self._filtered_trees[myroot]["dbs"]
5480                 vardb = self.trees[myroot]["vartree"].dbapi
5481                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5482                 portdb = self.trees[myroot]["porttree"].dbapi
5483                 bindb = self.trees[myroot]["bintree"].dbapi
5484                 pkgsettings = self.pkgsettings[myroot]
5485                 args = []
5486                 onlydeps = "--onlydeps" in self.myopts
5487                 lookup_owners = []
5488                 for x in myfiles:
5489                         ext = os.path.splitext(x)[1]
5490                         if ext==".tbz2":
5491                                 if not os.path.exists(x):
5492                                         if os.path.exists(
5493                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5494                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5495                                         elif os.path.exists(
5496                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5497                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5498                                         else:
5499                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5500                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5501                                                 return 0, myfavorites
5502                                 mytbz2=portage.xpak.tbz2(x)
5503                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5504                                 if os.path.realpath(x) != \
5505                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5506                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5507                                         return 0, myfavorites
5508                                 db_keys = list(bindb._aux_cache_keys)
5509                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5510                                 pkg = Package(type_name="binary", root_config=root_config,
5511                                         cpv=mykey, built=True, metadata=metadata,
5512                                         onlydeps=onlydeps)
5513                                 self._pkg_cache[pkg] = pkg
5514                                 args.append(PackageArg(arg=x, package=pkg,
5515                                         root_config=root_config))
5516                         elif ext==".ebuild":
5517                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5518                                 pkgdir = os.path.dirname(ebuild_path)
5519                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5520                                 cp = pkgdir[len(tree_root)+1:]
5521                                 e = portage.exception.PackageNotFound(
5522                                         ("%s is not in a valid portage tree " + \
5523                                         "hierarchy or does not exist") % x)
5524                                 if not portage.isvalidatom(cp):
5525                                         raise e
5526                                 cat = portage.catsplit(cp)[0]
5527                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5528                                 if not portage.isvalidatom("="+mykey):
5529                                         raise e
5530                                 ebuild_path = portdb.findname(mykey)
5531                                 if ebuild_path:
5532                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5533                                                 cp, os.path.basename(ebuild_path)):
5534                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5535                                                 return 0, myfavorites
5536                                         if mykey not in portdb.xmatch(
5537                                                 "match-visible", portage.dep_getkey(mykey)):
5538                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5539                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5540                                                 print colorize("BAD", "*** page for details.")
5541                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5542                                                         "Continuing...")
5543                                 else:
5544                                         raise portage.exception.PackageNotFound(
5545                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5546                                 db_keys = list(portdb._aux_cache_keys)
5547                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5548                                 pkg = Package(type_name="ebuild", root_config=root_config,
5549                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5550                                 pkgsettings.setcpv(pkg)
5551                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5552                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5553                                 self._pkg_cache[pkg] = pkg
5554                                 args.append(PackageArg(arg=x, package=pkg,
5555                                         root_config=root_config))
5556                         elif x.startswith(os.path.sep):
5557                                 if not x.startswith(myroot):
5558                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5559                                                 " $ROOT.\n") % x, noiselevel=-1)
5560                                         return 0, []
5561                                 # Queue these up since it's most efficient to handle
5562                                 # multiple files in a single iter_owners() call.
5563                                 lookup_owners.append(x)
5564                         else:
5565                                 if x in ("system", "world"):
5566                                         x = SETPREFIX + x
5567                                 if x.startswith(SETPREFIX):
5568                                         s = x[len(SETPREFIX):]
5569                                         if s not in sets:
5570                                                 raise portage.exception.PackageSetNotFound(s)
5571                                         if s in self._sets:
5572                                                 continue
5573                                         # Recursively expand sets so that containment tests in
5574                                         # self._get_parent_sets() properly match atoms in nested
5575                                         # sets (like if world contains system).
5576                                         expanded_set = InternalPackageSet(
5577                                                 initial_atoms=getSetAtoms(s))
5578                                         self._sets[s] = expanded_set
5579                                         args.append(SetArg(arg=x, set=expanded_set,
5580                                                 root_config=root_config))
5581                                         continue
5582                                 if not is_valid_package_atom(x):
5583                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5584                                                 noiselevel=-1)
5585                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5586                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5587                                         return (0,[])
5588                                 # Don't expand categories or old-style virtuals here unless
5589                                 # necessary. Expansion of old-style virtuals here causes at
5590                                 # least the following problems:
5591                                 #   1) It's more difficult to determine which set(s) an atom
5592                                 #      came from, if any.
5593                                 #   2) It takes away freedom from the resolver to choose other
5594                                 #      possible expansions when necessary.
5595                                 if "/" in x:
5596                                         args.append(AtomArg(arg=x, atom=x,
5597                                                 root_config=root_config))
5598                                         continue
5599                                 expanded_atoms = self._dep_expand(root_config, x)
5600                                 installed_cp_set = set()
5601                                 for atom in expanded_atoms:
5602                                         atom_cp = portage.dep_getkey(atom)
5603                                         if vardb.cp_list(atom_cp):
5604                                                 installed_cp_set.add(atom_cp)
5605
5606                                 if len(installed_cp_set) > 1:
5607                                         non_virtual_cps = set()
5608                                         for atom_cp in installed_cp_set:
5609                                                 if not atom_cp.startswith("virtual/"):
5610                                                         non_virtual_cps.add(atom_cp)
5611                                         if len(non_virtual_cps) == 1:
5612                                                 installed_cp_set = non_virtual_cps
5613
5614                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5615                                         installed_cp = iter(installed_cp_set).next()
5616                                         expanded_atoms = [atom for atom in expanded_atoms \
5617                                                 if portage.dep_getkey(atom) == installed_cp]
5618
5619                                 if len(expanded_atoms) > 1:
5620                                         print
5621                                         print
5622                                         ambiguous_package_name(x, expanded_atoms, root_config,
5623                                                 self.spinner, self.myopts)
5624                                         return False, myfavorites
5625                                 if expanded_atoms:
5626                                         atom = expanded_atoms[0]
5627                                 else:
5628                                         null_atom = insert_category_into_atom(x, "null")
5629                                         null_cp = portage.dep_getkey(null_atom)
5630                                         cat, atom_pn = portage.catsplit(null_cp)
5631                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5632                                         if virts_p:
5633                                                 # Allow the depgraph to choose which virtual.
5634                                                 atom = insert_category_into_atom(x, "virtual")
5635                                         else:
5636                                                 atom = insert_category_into_atom(x, "null")
5637
5638                                 args.append(AtomArg(arg=x, atom=atom,
5639                                         root_config=root_config))
5640
5641                 if lookup_owners:
5642                         relative_paths = []
5643                         search_for_multiple = False
5644                         if len(lookup_owners) > 1:
5645                                 search_for_multiple = True
5646
5647                         for x in lookup_owners:
5648                                 if not search_for_multiple and os.path.isdir(x):
5649                                         search_for_multiple = True
5650                                 relative_paths.append(x[len(myroot):])
5651
5652                         owners = set()
5653                         for pkg, relative_path in \
5654                                 real_vardb._owners.iter_owners(relative_paths):
5655                                 owners.add(pkg.mycpv)
5656                                 if not search_for_multiple:
5657                                         break
5658
5659                         if not owners:
5660                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5661                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5662                                 return 0, []
5663
5664                         for cpv in owners:
5665                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5666                                 if not slot:
5667                                         # portage now masks packages with missing slot, but it's
5668                                         # possible that one was installed by an older version
5669                                         atom = portage.cpv_getkey(cpv)
5670                                 else:
5671                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5672                                 args.append(AtomArg(arg=atom, atom=atom,
5673                                         root_config=root_config))
5674
5675                 if "--update" in self.myopts:
5676                         # In some cases, the greedy slots behavior can pull in a slot that
5677                         # the user would want to uninstall due to it being blocked by a
5678                         # newer version in a different slot. Therefore, it's necessary to
5679                         # detect and discard any that should be uninstalled. Each time
5680                         # that arguments are updated, package selections are repeated in
5681                         # order to ensure consistency with the current arguments:
5682                         #
5683                         #  1) Initialize args
5684                         #  2) Select packages and generate initial greedy atoms
5685                         #  3) Update args with greedy atoms
5686                         #  4) Select packages and generate greedy atoms again, while
5687                         #     accounting for any blockers between selected packages
5688                         #  5) Update args with revised greedy atoms
5689
5690                         self._set_args(args)
5691                         greedy_args = []
5692                         for arg in args:
5693                                 greedy_args.append(arg)
5694                                 if not isinstance(arg, AtomArg):
5695                                         continue
5696                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5697                                         greedy_args.append(
5698                                                 AtomArg(arg=arg.arg, atom=atom,
5699                                                         root_config=arg.root_config))
5700
5701                         self._set_args(greedy_args)
5702                         del greedy_args
5703
5704                         # Revise greedy atoms, accounting for any blockers
5705                         # between selected packages.
5706                         revised_greedy_args = []
5707                         for arg in args:
5708                                 revised_greedy_args.append(arg)
5709                                 if not isinstance(arg, AtomArg):
5710                                         continue
5711                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5712                                         blocker_lookahead=True):
5713                                         revised_greedy_args.append(
5714                                                 AtomArg(arg=arg.arg, atom=atom,
5715                                                         root_config=arg.root_config))
5716                         args = revised_greedy_args
5717                         del revised_greedy_args
5718
5719                 self._set_args(args)
5720
5721                 myfavorites = set(myfavorites)
5722                 for arg in args:
5723                         if isinstance(arg, (AtomArg, PackageArg)):
5724                                 myfavorites.add(arg.atom)
5725                         elif isinstance(arg, SetArg):
5726                                 myfavorites.add(arg.arg)
5727                 myfavorites = list(myfavorites)
5728
5729                 pprovideddict = pkgsettings.pprovideddict
5730                 if debug:
5731                         portage.writemsg("\n", noiselevel=-1)
5732                 # Order needs to be preserved since a feature of --nodeps
5733                 # is to allow the user to force a specific merge order.
5734                 args.reverse()
5735                 while args:
5736                         arg = args.pop()
5737                         for atom in arg.set:
5738                                 self.spinner.update()
5739                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5740                                         root=myroot, parent=arg)
5741                                 atom_cp = portage.dep_getkey(atom)
5742                                 try:
5743                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5744                                         if pprovided and portage.match_from_list(atom, pprovided):
5745                                                 # A provided package has been specified on the command line.
5746                                                 self._pprovided_args.append((arg, atom))
5747                                                 continue
5748                                         if isinstance(arg, PackageArg):
5749                                                 if not self._add_pkg(arg.package, dep) or \
5750                                                         not self._create_graph():
5751                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5752                                                                 "dependencies for %s\n") % arg.arg)
5753                                                         return 0, myfavorites
5754                                                 continue
5755                                         if debug:
5756                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5757                                                         (arg, atom), noiselevel=-1)
5758                                         pkg, existing_node = self._select_package(
5759                                                 myroot, atom, onlydeps=onlydeps)
5760                                         if not pkg:
5761                                                 if not (isinstance(arg, SetArg) and \
5762                                                         arg.name in ("system", "world")):
5763                                                         self._unsatisfied_deps_for_display.append(
5764                                                                 ((myroot, atom), {}))
5765                                                         return 0, myfavorites
5766                                                 self._missing_args.append((arg, atom))
5767                                                 continue
5768                                         if atom_cp != pkg.cp:
5769                                                 # For old-style virtuals, we need to repeat the
5770                                                 # package.provided check against the selected package.
5771                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5772                                                 pprovided = pprovideddict.get(pkg.cp)
5773                                                 if pprovided and \
5774                                                         portage.match_from_list(expanded_atom, pprovided):
5775                                                         # A provided package has been
5776                                                         # specified on the command line.
5777                                                         self._pprovided_args.append((arg, atom))
5778                                                         continue
5779                                         if pkg.installed and "selective" not in self.myparams:
5780                                                 self._unsatisfied_deps_for_display.append(
5781                                                         ((myroot, atom), {}))
5782                                                 # Previous behavior was to bail out in this case, but
5783                                                 # since the dep is satisfied by the installed package,
5784                                                 # it's more friendly to continue building the graph
5785                                                 # and just show a warning message. Therefore, only bail
5786                                                 # out here if the atom is not from either the system or
5787                                                 # world set.
5788                                                 if not (isinstance(arg, SetArg) and \
5789                                                         arg.name in ("system", "world")):
5790                                                         return 0, myfavorites
5791
5792                                         # Add the selected package to the graph as soon as possible
5793                                         # so that later dep_check() calls can use it as feedback
5794                                         # for making more consistent atom selections.
5795                                         if not self._add_pkg(pkg, dep):
5796                                                 if isinstance(arg, SetArg):
5797                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5798                                                                 "dependencies for %s from %s\n") % \
5799                                                                 (atom, arg.arg))
5800                                                 else:
5801                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5802                                                                 "dependencies for %s\n") % atom)
5803                                                 return 0, myfavorites
5804
5805                                 except portage.exception.MissingSignature, e:
5806                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5807                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5808                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5809                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5810                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5811                                         return 0, myfavorites
5812                                 except portage.exception.InvalidSignature, e:
5813                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5814                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5815                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5816                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5817                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5818                                         return 0, myfavorites
5819                                 except SystemExit, e:
5820                                         raise # Needed else can't exit
5821                                 except Exception, e:
5822                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5823                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5824                                         raise
5825
5826                 # Now that the root packages have been added to the graph,
5827                 # process the dependencies.
5828                 if not self._create_graph():
5829                         return 0, myfavorites
5830
5831                 missing=0
5832                 if "--usepkgonly" in self.myopts:
5833                         for xs in self.digraph.all_nodes():
5834                                 if not isinstance(xs, Package):
5835                                         continue
5836                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5837                                         if missing == 0:
5838                                                 print
5839                                         missing += 1
5840                                         print "Missing binary for:",xs[2]
5841
5842                 try:
5843                         self.altlist()
5844                 except self._unknown_internal_error:
5845                         return False, myfavorites
5846
5847                 # We're true here unless we are missing binaries.
5848                 return (not missing,myfavorites)
5849
5850         def _set_args(self, args):
5851                 """
5852                 Create the "args" package set from atoms and packages given as
5853                 arguments. This method can be called multiple times if necessary.
5854                 The package selection cache is automatically invalidated, since
5855                 arguments influence package selections.
5856                 """
5857                 args_set = self._sets["args"]
5858                 args_set.clear()
5859                 for arg in args:
5860                         if not isinstance(arg, (AtomArg, PackageArg)):
5861                                 continue
5862                         atom = arg.atom
5863                         if atom in args_set:
5864                                 continue
5865                         args_set.add(atom)
5866
5867                 self._set_atoms.clear()
5868                 self._set_atoms.update(chain(*self._sets.itervalues()))
5869                 atom_arg_map = self._atom_arg_map
5870                 atom_arg_map.clear()
5871                 for arg in args:
5872                         for atom in arg.set:
5873                                 atom_key = (atom, arg.root_config.root)
5874                                 refs = atom_arg_map.get(atom_key)
5875                                 if refs is None:
5876                                         refs = []
5877                                         atom_arg_map[atom_key] = refs
5878                                         if arg not in refs:
5879                                                 refs.append(arg)
5880
5881                 # Invalidate the package selection cache, since
5882                 # arguments influence package selections.
5883                 self._highest_pkg_cache.clear()
5884                 for trees in self._filtered_trees.itervalues():
5885                         trees["porttree"].dbapi._clear_cache()
5886
5887         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5888                 """
5889                 Return a list of slot atoms corresponding to installed slots that
5890                 differ from the slot of the highest visible match. When
5891                 blocker_lookahead is True, slot atoms that would trigger a blocker
5892                 conflict are automatically discarded, potentially allowing automatic
5893                 uninstallation of older slots when appropriate.
5894                 """
5895                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5896                 if highest_pkg is None:
5897                         return []
5898                 vardb = root_config.trees["vartree"].dbapi
5899                 slots = set()
5900                 for cpv in vardb.match(atom):
5901                         # don't mix new virtuals with old virtuals
5902                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5903                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5904
5905                 slots.add(highest_pkg.metadata["SLOT"])
5906                 if len(slots) == 1:
5907                         return []
5908                 greedy_pkgs = []
5909                 slots.remove(highest_pkg.metadata["SLOT"])
5910                 while slots:
5911                         slot = slots.pop()
5912                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5913                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5914                         if pkg is not None and \
5915                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5916                                 greedy_pkgs.append(pkg)
5917                 if not greedy_pkgs:
5918                         return []
5919                 if not blocker_lookahead:
5920                         return [pkg.slot_atom for pkg in greedy_pkgs]
5921
5922                 blockers = {}
5923                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5924                 for pkg in greedy_pkgs + [highest_pkg]:
5925                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5926                         try:
5927                                 atoms = self._select_atoms(
5928                                         pkg.root, dep_str, pkg.use.enabled,
5929                                         parent=pkg, strict=True)
5930                         except portage.exception.InvalidDependString:
5931                                 continue
5932                         blocker_atoms = (x for x in atoms if x.blocker)
5933                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5934
5935                 if highest_pkg not in blockers:
5936                         return []
5937
5938                 # filter packages with invalid deps
5939                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5940
5941                 # filter packages that conflict with highest_pkg
5942                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5943                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5944                         blockers[pkg].findAtomForPackage(highest_pkg))]
5945
5946                 if not greedy_pkgs:
5947                         return []
5948
5949                 # If two packages conflict, discard the lower version.
5950                 discard_pkgs = set()
5951                 greedy_pkgs.sort(reverse=True)
5952                 for i in xrange(len(greedy_pkgs) - 1):
5953                         pkg1 = greedy_pkgs[i]
5954                         if pkg1 in discard_pkgs:
5955                                 continue
5956                         for j in xrange(i + 1, len(greedy_pkgs)):
5957                                 pkg2 = greedy_pkgs[j]
5958                                 if pkg2 in discard_pkgs:
5959                                         continue
5960                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5961                                         blockers[pkg2].findAtomForPackage(pkg1):
5962                                         # pkg1 > pkg2
5963                                         discard_pkgs.add(pkg2)
5964
5965                 return [pkg.slot_atom for pkg in greedy_pkgs \
5966                         if pkg not in discard_pkgs]
5967
5968         def _select_atoms_from_graph(self, *pargs, **kwargs):
5969                 """
5970                 Prefer atoms matching packages that have already been
5971                 added to the graph or those that are installed and have
5972                 not been scheduled for replacement.
5973                 """
5974                 kwargs["trees"] = self._graph_trees
5975                 return self._select_atoms_highest_available(*pargs, **kwargs)
5976
5977         def _select_atoms_highest_available(self, root, depstring,
5978                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5979                 """This will raise InvalidDependString if necessary. If trees is
5980                 None then self._filtered_trees is used."""
5981                 pkgsettings = self.pkgsettings[root]
5982                 if trees is None:
5983                         trees = self._filtered_trees
5984                 if not getattr(priority, "buildtime", False):
5985                         # The parent should only be passed to dep_check() for buildtime
5986                         # dependencies since that's the only case when it's appropriate
5987                         # to trigger the circular dependency avoidance code which uses it.
5988                         # It's important not to trigger the same circular dependency
5989                         # avoidance code for runtime dependencies since it's not needed
5990                         # and it can promote an incorrect package choice.
5991                         parent = None
5992                 if True:
5993                         try:
5994                                 if parent is not None:
5995                                         trees[root]["parent"] = parent
5996                                 if not strict:
5997                                         portage.dep._dep_check_strict = False
5998                                 mycheck = portage.dep_check(depstring, None,
5999                                         pkgsettings, myuse=myuse,
6000                                         myroot=root, trees=trees)
6001                         finally:
6002                                 if parent is not None:
6003                                         trees[root].pop("parent")
6004                                 portage.dep._dep_check_strict = True
6005                         if not mycheck[0]:
6006                                 raise portage.exception.InvalidDependString(mycheck[1])
6007                         selected_atoms = mycheck[1]
6008                 return selected_atoms
6009
6010         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6011                 atom = portage.dep.Atom(atom)
6012                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6013                 atom_without_use = atom
6014                 if atom.use:
6015                         atom_without_use = portage.dep.remove_slot(atom)
6016                         if atom.slot:
6017                                 atom_without_use += ":" + atom.slot
6018                         atom_without_use = portage.dep.Atom(atom_without_use)
6019                 xinfo = '"%s"' % atom
6020                 if arg:
6021                         xinfo='"%s"' % arg
6022                 # Discard null/ from failed cpv_expand category expansion.
6023                 xinfo = xinfo.replace("null/", "")
6024                 masked_packages = []
6025                 missing_use = []
6026                 masked_pkg_instances = set()
6027                 missing_licenses = []
6028                 have_eapi_mask = False
6029                 pkgsettings = self.pkgsettings[root]
6030                 implicit_iuse = pkgsettings._get_implicit_iuse()
6031                 root_config = self.roots[root]
6032                 portdb = self.roots[root].trees["porttree"].dbapi
6033                 dbs = self._filtered_trees[root]["dbs"]
6034                 for db, pkg_type, built, installed, db_keys in dbs:
6035                         if installed:
6036                                 continue
6037                         match = db.match
6038                         if hasattr(db, "xmatch"):
6039                                 cpv_list = db.xmatch("match-all", atom_without_use)
6040                         else:
6041                                 cpv_list = db.match(atom_without_use)
6042                         # descending order
6043                         cpv_list.reverse()
6044                         for cpv in cpv_list:
6045                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6046                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6047                                 if metadata is not None:
6048                                         pkg = Package(built=built, cpv=cpv,
6049                                                 installed=installed, metadata=metadata,
6050                                                 root_config=root_config)
6051                                         if pkg.cp != atom.cp:
6052                                                 # A cpv can be returned from dbapi.match() as an
6053                                                 # old-style virtual match even in cases when the
6054                                                 # package does not actually PROVIDE the virtual.
6055                                                 # Filter out any such false matches here.
6056                                                 if not atom_set.findAtomForPackage(pkg):
6057                                                         continue
6058                                         if mreasons:
6059                                                 masked_pkg_instances.add(pkg)
6060                                         if atom.use:
6061                                                 missing_use.append(pkg)
6062                                                 if not mreasons:
6063                                                         continue
6064                                 masked_packages.append(
6065                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6066
6067                 missing_use_reasons = []
6068                 missing_iuse_reasons = []
6069                 for pkg in missing_use:
6070                         use = pkg.use.enabled
6071                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6072                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6073                         missing_iuse = []
6074                         for x in atom.use.required:
6075                                 if iuse_re.match(x) is None:
6076                                         missing_iuse.append(x)
6077                         mreasons = []
6078                         if missing_iuse:
6079                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6080                                 missing_iuse_reasons.append((pkg, mreasons))
6081                         else:
6082                                 need_enable = sorted(atom.use.enabled.difference(use))
6083                                 need_disable = sorted(atom.use.disabled.intersection(use))
6084                                 if need_enable or need_disable:
6085                                         changes = []
6086                                         changes.extend(colorize("red", "+" + x) \
6087                                                 for x in need_enable)
6088                                         changes.extend(colorize("blue", "-" + x) \
6089                                                 for x in need_disable)
6090                                         mreasons.append("Change USE: %s" % " ".join(changes))
6091                                         missing_use_reasons.append((pkg, mreasons))
6092
6093                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6094                         in missing_use_reasons if pkg not in masked_pkg_instances]
6095
6096                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6097                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6098
6099                 show_missing_use = False
6100                 if unmasked_use_reasons:
6101                         # Only show the latest version.
6102                         show_missing_use = unmasked_use_reasons[:1]
6103                 elif unmasked_iuse_reasons:
6104                         if missing_use_reasons:
6105                                 # All packages with required IUSE are masked,
6106                                 # so display a normal masking message.
6107                                 pass
6108                         else:
6109                                 show_missing_use = unmasked_iuse_reasons
6110
6111                 if show_missing_use:
6112                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6113                         print "!!! One of the following packages is required to complete your request:"
6114                         for pkg, mreasons in show_missing_use:
6115                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6116
6117                 elif masked_packages:
6118                         print "\n!!! " + \
6119                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6120                                 colorize("INFORM", xinfo) + \
6121                                 colorize("BAD", " have been masked.")
6122                         print "!!! One of the following masked packages is required to complete your request:"
6123                         have_eapi_mask = show_masked_packages(masked_packages)
6124                         if have_eapi_mask:
6125                                 print
6126                                 msg = ("The current version of portage supports " + \
6127                                         "EAPI '%s'. You must upgrade to a newer version" + \
6128                                         " of portage before EAPI masked packages can" + \
6129                                         " be installed.") % portage.const.EAPI
6130                                 from textwrap import wrap
6131                                 for line in wrap(msg, 75):
6132                                         print line
6133                         print
6134                         show_mask_docs()
6135                 else:
6136                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6137
6138                 # Show parent nodes and the argument that pulled them in.
6139                 traversed_nodes = set()
6140                 node = myparent
6141                 msg = []
6142                 while node is not None:
6143                         traversed_nodes.add(node)
6144                         msg.append('(dependency required by "%s" [%s])' % \
6145                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6146                         # When traversing to parents, prefer arguments over packages
6147                         # since arguments are root nodes. Never traverse the same
6148                         # package twice, in order to prevent an infinite loop.
6149                         selected_parent = None
6150                         for parent in self.digraph.parent_nodes(node):
6151                                 if isinstance(parent, DependencyArg):
6152                                         msg.append('(dependency required by "%s" [argument])' % \
6153                                                 (colorize('INFORM', str(parent))))
6154                                         selected_parent = None
6155                                         break
6156                                 if parent not in traversed_nodes:
6157                                         selected_parent = parent
6158                         node = selected_parent
6159                 for line in msg:
6160                         print line
6161
6162                 print
6163
6164         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6165                 cache_key = (root, atom, onlydeps)
6166                 ret = self._highest_pkg_cache.get(cache_key)
6167                 if ret is not None:
6168                         pkg, existing = ret
6169                         if pkg and not existing:
6170                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6171                                 if existing and existing == pkg:
6172                                         # Update the cache to reflect that the
6173                                         # package has been added to the graph.
6174                                         ret = pkg, pkg
6175                                         self._highest_pkg_cache[cache_key] = ret
6176                         return ret
6177                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6178                 self._highest_pkg_cache[cache_key] = ret
6179                 pkg, existing = ret
6180                 if pkg is not None:
6181                         settings = pkg.root_config.settings
6182                         if visible(settings, pkg) and not (pkg.installed and \
6183                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6184                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6185                 return ret
6186
6187         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6188                 root_config = self.roots[root]
6189                 pkgsettings = self.pkgsettings[root]
6190                 dbs = self._filtered_trees[root]["dbs"]
6191                 vardb = self.roots[root].trees["vartree"].dbapi
6192                 portdb = self.roots[root].trees["porttree"].dbapi
6193                 # List of acceptable packages, ordered by type preference.
6194                 matched_packages = []
6195                 highest_version = None
6196                 if not isinstance(atom, portage.dep.Atom):
6197                         atom = portage.dep.Atom(atom)
6198                 atom_cp = atom.cp
6199                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6200                 existing_node = None
6201                 myeb = None
6202                 usepkgonly = "--usepkgonly" in self.myopts
6203                 empty = "empty" in self.myparams
6204                 selective = "selective" in self.myparams
6205                 reinstall = False
6206                 noreplace = "--noreplace" in self.myopts
6207                 # Behavior of the "selective" parameter depends on
6208                 # whether or not a package matches an argument atom.
6209                 # If an installed package provides an old-style
6210                 # virtual that is no longer provided by an available
6211                 # package, the installed package may match an argument
6212                 # atom even though none of the available packages do.
6213                 # Therefore, "selective" logic does not consider
6214                 # whether or not an installed package matches an
6215                 # argument atom. It only considers whether or not
6216                 # available packages match argument atoms, which is
6217                 # represented by the found_available_arg flag.
6218                 found_available_arg = False
6219                 for find_existing_node in True, False:
6220                         if existing_node:
6221                                 break
6222                         for db, pkg_type, built, installed, db_keys in dbs:
6223                                 if existing_node:
6224                                         break
6225                                 if installed and not find_existing_node:
6226                                         want_reinstall = reinstall or empty or \
6227                                                 (found_available_arg and not selective)
6228                                         if want_reinstall and matched_packages:
6229                                                 continue
6230                                 if hasattr(db, "xmatch"):
6231                                         cpv_list = db.xmatch("match-all", atom)
6232                                 else:
6233                                         cpv_list = db.match(atom)
6234
6235                                 # USE=multislot can make an installed package appear as if
6236                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6237                                 # won't do any good as long as USE=multislot is enabled since
6238                                 # the newly built package still won't have the expected slot.
6239                                 # Therefore, assume that such SLOT dependencies are already
6240                                 # satisfied rather than forcing a rebuild.
6241                                 if installed and not cpv_list and atom.slot:
6242                                         for cpv in db.match(atom.cp):
6243                                                 slot_available = False
6244                                                 for other_db, other_type, other_built, \
6245                                                         other_installed, other_keys in dbs:
6246                                                         try:
6247                                                                 if atom.slot == \
6248                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6249                                                                         slot_available = True
6250                                                                         break
6251                                                         except KeyError:
6252                                                                 pass
6253                                                 if not slot_available:
6254                                                         continue
6255                                                 inst_pkg = self._pkg(cpv, "installed",
6256                                                         root_config, installed=installed)
6257                                                 # Remove the slot from the atom and verify that
6258                                                 # the package matches the resulting atom.
6259                                                 atom_without_slot = portage.dep.remove_slot(atom)
6260                                                 if atom.use:
6261                                                         atom_without_slot += str(atom.use)
6262                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6263                                                 if portage.match_from_list(
6264                                                         atom_without_slot, [inst_pkg]):
6265                                                         cpv_list = [inst_pkg.cpv]
6266                                                 break
6267
6268                                 if not cpv_list:
6269                                         continue
6270                                 pkg_status = "merge"
6271                                 if installed or onlydeps:
6272                                         pkg_status = "nomerge"
6273                                 # descending order
6274                                 cpv_list.reverse()
6275                                 for cpv in cpv_list:
6276                                         # Make --noreplace take precedence over --newuse.
6277                                         if not installed and noreplace and \
6278                                                 cpv in vardb.match(atom):
6279                                                 # If the installed version is masked, it may
6280                                                 # be necessary to look at lower versions,
6281                                                 # in case there is a visible downgrade.
6282                                                 continue
6283                                         reinstall_for_flags = None
6284                                         cache_key = (pkg_type, root, cpv, pkg_status)
6285                                         calculated_use = True
6286                                         pkg = self._pkg_cache.get(cache_key)
6287                                         if pkg is None:
6288                                                 calculated_use = False
6289                                                 try:
6290                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6291                                                 except KeyError:
6292                                                         continue
6293                                                 pkg = Package(built=built, cpv=cpv,
6294                                                         installed=installed, metadata=metadata,
6295                                                         onlydeps=onlydeps, root_config=root_config,
6296                                                         type_name=pkg_type)
6297                                                 metadata = pkg.metadata
6298                                                 if not built:
6299                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6300                                                 if not built and ("?" in metadata["LICENSE"] or \
6301                                                         "?" in metadata["PROVIDE"]):
6302                                                         # This is avoided whenever possible because
6303                                                         # it's expensive. It only needs to be done here
6304                                                         # if it has an effect on visibility.
6305                                                         pkgsettings.setcpv(pkg)
6306                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6307                                                         calculated_use = True
6308                                                 self._pkg_cache[pkg] = pkg
6309
6310                                         if not installed or (built and matched_packages):
6311                                                 # Only enforce visibility on installed packages
6312                                                 # if there is at least one other visible package
6313                                                 # available. By filtering installed masked packages
6314                                                 # here, packages that have been masked since they
6315                                                 # were installed can be automatically downgraded
6316                                                 # to an unmasked version.
6317                                                 try:
6318                                                         if not visible(pkgsettings, pkg):
6319                                                                 continue
6320                                                 except portage.exception.InvalidDependString:
6321                                                         if not installed:
6322                                                                 continue
6323
6324                                                 # Enable upgrade or downgrade to a version
6325                                                 # with visible KEYWORDS when the installed
6326                                                 # version is masked by KEYWORDS, but never
6327                                                 # reinstall the same exact version only due
6328                                                 # to a KEYWORDS mask.
6329                                                 if built and matched_packages:
6330
6331                                                         different_version = None
6332                                                         for avail_pkg in matched_packages:
6333                                                                 if not portage.dep.cpvequal(
6334                                                                         pkg.cpv, avail_pkg.cpv):
6335                                                                         different_version = avail_pkg
6336                                                                         break
6337                                                         if different_version is not None:
6338
6339                                                                 if installed and \
6340                                                                         pkgsettings._getMissingKeywords(
6341                                                                         pkg.cpv, pkg.metadata):
6342                                                                         continue
6343
6344                                                                 # If the ebuild no longer exists or it's
6345                                                                 # keywords have been dropped, reject built
6346                                                                 # instances (installed or binary).
6347                                                                 # If --usepkgonly is enabled, assume that
6348                                                                 # the ebuild status should be ignored.
6349                                                                 if not usepkgonly:
6350                                                                         try:
6351                                                                                 pkg_eb = self._pkg(
6352                                                                                         pkg.cpv, "ebuild", root_config)
6353                                                                         except portage.exception.PackageNotFound:
6354                                                                                 continue
6355                                                                         else:
6356                                                                                 if not visible(pkgsettings, pkg_eb):
6357                                                                                         continue
6358
6359                                         if not pkg.built and not calculated_use:
6360                                                 # This is avoided whenever possible because
6361                                                 # it's expensive.
6362                                                 pkgsettings.setcpv(pkg)
6363                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6364
6365                                         if pkg.cp != atom.cp:
6366                                                 # A cpv can be returned from dbapi.match() as an
6367                                                 # old-style virtual match even in cases when the
6368                                                 # package does not actually PROVIDE the virtual.
6369                                                 # Filter out any such false matches here.
6370                                                 if not atom_set.findAtomForPackage(pkg):
6371                                                         continue
6372
6373                                         myarg = None
6374                                         if root == self.target_root:
6375                                                 try:
6376                                                         # Ebuild USE must have been calculated prior
6377                                                         # to this point, in case atoms have USE deps.
6378                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6379                                                 except StopIteration:
6380                                                         pass
6381                                                 except portage.exception.InvalidDependString:
6382                                                         if not installed:
6383                                                                 # masked by corruption
6384                                                                 continue
6385                                         if not installed and myarg:
6386                                                 found_available_arg = True
6387
6388                                         if atom.use and not pkg.built:
6389                                                 use = pkg.use.enabled
6390                                                 if atom.use.enabled.difference(use):
6391                                                         continue
6392                                                 if atom.use.disabled.intersection(use):
6393                                                         continue
6394                                         if pkg.cp == atom_cp:
6395                                                 if highest_version is None:
6396                                                         highest_version = pkg
6397                                                 elif pkg > highest_version:
6398                                                         highest_version = pkg
6399                                         # At this point, we've found the highest visible
6400                                         # match from the current repo. Any lower versions
6401                                         # from this repo are ignored, so this so the loop
6402                                         # will always end with a break statement below
6403                                         # this point.
6404                                         if find_existing_node:
6405                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6406                                                 if not e_pkg:
6407                                                         break
6408                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6409                                                         if highest_version and \
6410                                                                 e_pkg.cp == atom_cp and \
6411                                                                 e_pkg < highest_version and \
6412                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6413                                                                 # There is a higher version available in a
6414                                                                 # different slot, so this existing node is
6415                                                                 # irrelevant.
6416                                                                 pass
6417                                                         else:
6418                                                                 matched_packages.append(e_pkg)
6419                                                                 existing_node = e_pkg
6420                                                 break
6421                                         # Compare built package to current config and
6422                                         # reject the built package if necessary.
6423                                         if built and not installed and \
6424                                                 ("--newuse" in self.myopts or \
6425                                                 "--reinstall" in self.myopts):
6426                                                 iuses = pkg.iuse.all
6427                                                 old_use = pkg.use.enabled
6428                                                 if myeb:
6429                                                         pkgsettings.setcpv(myeb)
6430                                                 else:
6431                                                         pkgsettings.setcpv(pkg)
6432                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6433                                                 forced_flags = set()
6434                                                 forced_flags.update(pkgsettings.useforce)
6435                                                 forced_flags.update(pkgsettings.usemask)
6436                                                 cur_iuse = iuses
6437                                                 if myeb and not usepkgonly:
6438                                                         cur_iuse = myeb.iuse.all
6439                                                 if self._reinstall_for_flags(forced_flags,
6440                                                         old_use, iuses,
6441                                                         now_use, cur_iuse):
6442                                                         break
6443                                         # Compare current config to installed package
6444                                         # and do not reinstall if possible.
6445                                         if not installed and \
6446                                                 ("--newuse" in self.myopts or \
6447                                                 "--reinstall" in self.myopts) and \
6448                                                 cpv in vardb.match(atom):
6449                                                 pkgsettings.setcpv(pkg)
6450                                                 forced_flags = set()
6451                                                 forced_flags.update(pkgsettings.useforce)
6452                                                 forced_flags.update(pkgsettings.usemask)
6453                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6454                                                 old_iuse = set(filter_iuse_defaults(
6455                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6456                                                 cur_use = pkg.use.enabled
6457                                                 cur_iuse = pkg.iuse.all
6458                                                 reinstall_for_flags = \
6459                                                         self._reinstall_for_flags(
6460                                                         forced_flags, old_use, old_iuse,
6461                                                         cur_use, cur_iuse)
6462                                                 if reinstall_for_flags:
6463                                                         reinstall = True
6464                                         if not built:
6465                                                 myeb = pkg
6466                                         matched_packages.append(pkg)
6467                                         if reinstall_for_flags:
6468                                                 self._reinstall_nodes[pkg] = \
6469                                                         reinstall_for_flags
6470                                         break
6471
6472                 if not matched_packages:
6473                         return None, None
6474
6475                 if "--debug" in self.myopts:
6476                         for pkg in matched_packages:
6477                                 portage.writemsg("%s %s\n" % \
6478                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6479
6480                 # Filter out any old-style virtual matches if they are
6481                 # mixed with new-style virtual matches.
6482                 cp = portage.dep_getkey(atom)
6483                 if len(matched_packages) > 1 and \
6484                         "virtual" == portage.catsplit(cp)[0]:
6485                         for pkg in matched_packages:
6486                                 if pkg.cp != cp:
6487                                         continue
6488                                 # Got a new-style virtual, so filter
6489                                 # out any old-style virtuals.
6490                                 matched_packages = [pkg for pkg in matched_packages \
6491                                         if pkg.cp == cp]
6492                                 break
6493
6494                 if len(matched_packages) > 1:
6495                         bestmatch = portage.best(
6496                                 [pkg.cpv for pkg in matched_packages])
6497                         matched_packages = [pkg for pkg in matched_packages \
6498                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6499
6500                 # ordered by type preference ("ebuild" type is the last resort)
6501                 return  matched_packages[-1], existing_node
6502
6503         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6504                 """
6505                 Select packages that have already been added to the graph or
6506                 those that are installed and have not been scheduled for
6507                 replacement.
6508                 """
6509                 graph_db = self._graph_trees[root]["porttree"].dbapi
6510                 matches = graph_db.match_pkgs(atom)
6511                 if not matches:
6512                         return None, None
6513                 pkg = matches[-1] # highest match
6514                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6515                 return pkg, in_graph
6516
6517         def _complete_graph(self):
6518                 """
6519                 Add any deep dependencies of required sets (args, system, world) that
6520                 have not been pulled into the graph yet. This ensures that the graph
6521                 is consistent such that initially satisfied deep dependencies are not
6522                 broken in the new graph. Initially unsatisfied dependencies are
6523                 irrelevant since we only want to avoid breaking dependencies that are
6524                 intially satisfied.
6525
6526                 Since this method can consume enough time to disturb users, it is
6527                 currently only enabled by the --complete-graph option.
6528                 """
6529                 if "--buildpkgonly" in self.myopts or \
6530                         "recurse" not in self.myparams:
6531                         return 1
6532
6533                 if "complete" not in self.myparams:
6534                         # Skip this to avoid consuming enough time to disturb users.
6535                         return 1
6536
6537                 # Put the depgraph into a mode that causes it to only
6538                 # select packages that have already been added to the
6539                 # graph or those that are installed and have not been
6540                 # scheduled for replacement. Also, toggle the "deep"
6541                 # parameter so that all dependencies are traversed and
6542                 # accounted for.
6543                 self._select_atoms = self._select_atoms_from_graph
6544                 self._select_package = self._select_pkg_from_graph
6545                 already_deep = "deep" in self.myparams
6546                 if not already_deep:
6547                         self.myparams.add("deep")
6548
6549                 for root in self.roots:
6550                         required_set_names = self._required_set_names.copy()
6551                         if root == self.target_root and \
6552                                 (already_deep or "empty" in self.myparams):
6553                                 required_set_names.difference_update(self._sets)
6554                         if not required_set_names and not self._ignored_deps:
6555                                 continue
6556                         root_config = self.roots[root]
6557                         setconfig = root_config.setconfig
6558                         args = []
6559                         # Reuse existing SetArg instances when available.
6560                         for arg in self.digraph.root_nodes():
6561                                 if not isinstance(arg, SetArg):
6562                                         continue
6563                                 if arg.root_config != root_config:
6564                                         continue
6565                                 if arg.name in required_set_names:
6566                                         args.append(arg)
6567                                         required_set_names.remove(arg.name)
6568                         # Create new SetArg instances only when necessary.
6569                         for s in required_set_names:
6570                                 expanded_set = InternalPackageSet(
6571                                         initial_atoms=setconfig.getSetAtoms(s))
6572                                 atom = SETPREFIX + s
6573                                 args.append(SetArg(arg=atom, set=expanded_set,
6574                                         root_config=root_config))
6575                         vardb = root_config.trees["vartree"].dbapi
6576                         for arg in args:
6577                                 for atom in arg.set:
6578                                         self._dep_stack.append(
6579                                                 Dependency(atom=atom, root=root, parent=arg))
6580                         if self._ignored_deps:
6581                                 self._dep_stack.extend(self._ignored_deps)
6582                                 self._ignored_deps = []
6583                         if not self._create_graph(allow_unsatisfied=True):
6584                                 return 0
6585                         # Check the unsatisfied deps to see if any initially satisfied deps
6586                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6587                         # deps are irrelevant since we only want to avoid breaking deps
6588                         # that are initially satisfied.
6589                         while self._unsatisfied_deps:
6590                                 dep = self._unsatisfied_deps.pop()
6591                                 matches = vardb.match_pkgs(dep.atom)
6592                                 if not matches:
6593                                         self._initially_unsatisfied_deps.append(dep)
6594                                         continue
6595                                 # An scheduled installation broke a deep dependency.
6596                                 # Add the installed package to the graph so that it
6597                                 # will be appropriately reported as a slot collision
6598                                 # (possibly solvable via backtracking).
6599                                 pkg = matches[-1] # highest match
6600                                 if not self._add_pkg(pkg, dep):
6601                                         return 0
6602                                 if not self._create_graph(allow_unsatisfied=True):
6603                                         return 0
6604                 return 1
6605
6606         def _pkg(self, cpv, type_name, root_config, installed=False):
6607                 """
6608                 Get a package instance from the cache, or create a new
6609                 one if necessary. Raises KeyError from aux_get if it
6610                 failures for some reason (package does not exist or is
6611                 corrupt).
6612                 """
6613                 operation = "merge"
6614                 if installed:
6615                         operation = "nomerge"
6616                 pkg = self._pkg_cache.get(
6617                         (type_name, root_config.root, cpv, operation))
6618                 if pkg is None:
6619                         tree_type = self.pkg_tree_map[type_name]
6620                         db = root_config.trees[tree_type].dbapi
6621                         db_keys = list(self._trees_orig[root_config.root][
6622                                 tree_type].dbapi._aux_cache_keys)
6623                         try:
6624                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6625                         except KeyError:
6626                                 raise portage.exception.PackageNotFound(cpv)
6627                         pkg = Package(cpv=cpv, metadata=metadata,
6628                                 root_config=root_config, installed=installed)
6629                         if type_name == "ebuild":
6630                                 settings = self.pkgsettings[root_config.root]
6631                                 settings.setcpv(pkg)
6632                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6633                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6634                         self._pkg_cache[pkg] = pkg
6635                 return pkg
6636
6637         def validate_blockers(self):
6638                 """Remove any blockers from the digraph that do not match any of the
6639                 packages within the graph.  If necessary, create hard deps to ensure
6640                 correct merge order such that mutually blocking packages are never
6641                 installed simultaneously."""
6642
6643                 if "--buildpkgonly" in self.myopts or \
6644                         "--nodeps" in self.myopts:
6645                         return True
6646
6647                 #if "deep" in self.myparams:
6648                 if True:
6649                         # Pull in blockers from all installed packages that haven't already
6650                         # been pulled into the depgraph.  This is not enabled by default
6651                         # due to the performance penalty that is incurred by all the
6652                         # additional dep_check calls that are required.
6653
6654                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6655                         for myroot in self.trees:
6656                                 vardb = self.trees[myroot]["vartree"].dbapi
6657                                 portdb = self.trees[myroot]["porttree"].dbapi
6658                                 pkgsettings = self.pkgsettings[myroot]
6659                                 final_db = self.mydbapi[myroot]
6660
6661                                 blocker_cache = BlockerCache(myroot, vardb)
6662                                 stale_cache = set(blocker_cache)
6663                                 for pkg in vardb:
6664                                         cpv = pkg.cpv
6665                                         stale_cache.discard(cpv)
6666                                         pkg_in_graph = self.digraph.contains(pkg)
6667
6668                                         # Check for masked installed packages. Only warn about
6669                                         # packages that are in the graph in order to avoid warning
6670                                         # about those that will be automatically uninstalled during
6671                                         # the merge process or by --depclean.
6672                                         if pkg in final_db:
6673                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6674                                                         self._masked_installed.add(pkg)
6675
6676                                         blocker_atoms = None
6677                                         blockers = None
6678                                         if pkg_in_graph:
6679                                                 blockers = []
6680                                                 try:
6681                                                         blockers.extend(
6682                                                                 self._blocker_parents.child_nodes(pkg))
6683                                                 except KeyError:
6684                                                         pass
6685                                                 try:
6686                                                         blockers.extend(
6687                                                                 self._irrelevant_blockers.child_nodes(pkg))
6688                                                 except KeyError:
6689                                                         pass
6690                                         if blockers is not None:
6691                                                 blockers = set(str(blocker.atom) \
6692                                                         for blocker in blockers)
6693
6694                                         # If this node has any blockers, create a "nomerge"
6695                                         # node for it so that they can be enforced.
6696                                         self.spinner.update()
6697                                         blocker_data = blocker_cache.get(cpv)
6698                                         if blocker_data is not None and \
6699                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6700                                                 blocker_data = None
6701
6702                                         # If blocker data from the graph is available, use
6703                                         # it to validate the cache and update the cache if
6704                                         # it seems invalid.
6705                                         if blocker_data is not None and \
6706                                                 blockers is not None:
6707                                                 if not blockers.symmetric_difference(
6708                                                         blocker_data.atoms):
6709                                                         continue
6710                                                 blocker_data = None
6711
6712                                         if blocker_data is None and \
6713                                                 blockers is not None:
6714                                                 # Re-use the blockers from the graph.
6715                                                 blocker_atoms = sorted(blockers)
6716                                                 counter = long(pkg.metadata["COUNTER"])
6717                                                 blocker_data = \
6718                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6719                                                 blocker_cache[pkg.cpv] = blocker_data
6720                                                 continue
6721
6722                                         if blocker_data:
6723                                                 blocker_atoms = blocker_data.atoms
6724                                         else:
6725                                                 # Use aux_get() to trigger FakeVartree global
6726                                                 # updates on *DEPEND when appropriate.
6727                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6728                                                 # It is crucial to pass in final_db here in order to
6729                                                 # optimize dep_check calls by eliminating atoms via
6730                                                 # dep_wordreduce and dep_eval calls.
6731                                                 try:
6732                                                         portage.dep._dep_check_strict = False
6733                                                         try:
6734                                                                 success, atoms = portage.dep_check(depstr,
6735                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6736                                                                         trees=self._graph_trees, myroot=myroot)
6737                                                         except Exception, e:
6738                                                                 if isinstance(e, SystemExit):
6739                                                                         raise
6740                                                                 # This is helpful, for example, if a ValueError
6741                                                                 # is thrown from cpv_expand due to multiple
6742                                                                 # matches (this can happen if an atom lacks a
6743                                                                 # category).
6744                                                                 show_invalid_depstring_notice(
6745                                                                         pkg, depstr, str(e))
6746                                                                 del e
6747                                                                 raise
6748                                                 finally:
6749                                                         portage.dep._dep_check_strict = True
6750                                                 if not success:
6751                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6752                                                         if replacement_pkg and \
6753                                                                 replacement_pkg[0].operation == "merge":
6754                                                                 # This package is being replaced anyway, so
6755                                                                 # ignore invalid dependencies so as not to
6756                                                                 # annoy the user too much (otherwise they'd be
6757                                                                 # forced to manually unmerge it first).
6758                                                                 continue
6759                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6760                                                         return False
6761                                                 blocker_atoms = [myatom for myatom in atoms \
6762                                                         if myatom.startswith("!")]
6763                                                 blocker_atoms.sort()
6764                                                 counter = long(pkg.metadata["COUNTER"])
6765                                                 blocker_cache[cpv] = \
6766                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6767                                         if blocker_atoms:
6768                                                 try:
6769                                                         for atom in blocker_atoms:
6770                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6771                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6772                                                                 self._blocker_parents.add(blocker, pkg)
6773                                                 except portage.exception.InvalidAtom, e:
6774                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6775                                                         show_invalid_depstring_notice(
6776                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6777                                                         return False
6778                                 for cpv in stale_cache:
6779                                         del blocker_cache[cpv]
6780                                 blocker_cache.flush()
6781                                 del blocker_cache
6782
6783                 # Discard any "uninstall" tasks scheduled by previous calls
6784                 # to this method, since those tasks may not make sense given
6785                 # the current graph state.
6786                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6787                 if previous_uninstall_tasks:
6788                         self._blocker_uninstalls = digraph()
6789                         self.digraph.difference_update(previous_uninstall_tasks)
6790
6791                 for blocker in self._blocker_parents.leaf_nodes():
6792                         self.spinner.update()
6793                         root_config = self.roots[blocker.root]
6794                         virtuals = root_config.settings.getvirtuals()
6795                         myroot = blocker.root
6796                         initial_db = self.trees[myroot]["vartree"].dbapi
6797                         final_db = self.mydbapi[myroot]
6798                         
6799                         provider_virtual = False
6800                         if blocker.cp in virtuals and \
6801                                 not self._have_new_virt(blocker.root, blocker.cp):
6802                                 provider_virtual = True
6803
6804                         # Use this to check PROVIDE for each matched package
6805                         # when necessary.
6806                         atom_set = InternalPackageSet(
6807                                 initial_atoms=[blocker.atom])
6808
6809                         if provider_virtual:
6810                                 atoms = []
6811                                 for provider_entry in virtuals[blocker.cp]:
6812                                         provider_cp = \
6813                                                 portage.dep_getkey(provider_entry)
6814                                         atoms.append(blocker.atom.replace(
6815                                                 blocker.cp, provider_cp))
6816                         else:
6817                                 atoms = [blocker.atom]
6818
6819                         blocked_initial = set()
6820                         for atom in atoms:
6821                                 for pkg in initial_db.match_pkgs(atom):
6822                                         if atom_set.findAtomForPackage(pkg):
6823                                                 blocked_initial.add(pkg)
6824
6825                         blocked_final = set()
6826                         for atom in atoms:
6827                                 for pkg in final_db.match_pkgs(atom):
6828                                         if atom_set.findAtomForPackage(pkg):
6829                                                 blocked_final.add(pkg)
6830
6831                         if not blocked_initial and not blocked_final:
6832                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6833                                 self._blocker_parents.remove(blocker)
6834                                 # Discard any parents that don't have any more blockers.
6835                                 for pkg in parent_pkgs:
6836                                         self._irrelevant_blockers.add(blocker, pkg)
6837                                         if not self._blocker_parents.child_nodes(pkg):
6838                                                 self._blocker_parents.remove(pkg)
6839                                 continue
6840                         for parent in self._blocker_parents.parent_nodes(blocker):
6841                                 unresolved_blocks = False
6842                                 depends_on_order = set()
6843                                 for pkg in blocked_initial:
6844                                         if pkg.slot_atom == parent.slot_atom:
6845                                                 # TODO: Support blocks within slots in cases where it
6846                                                 # might make sense.  For example, a new version might
6847                                                 # require that the old version be uninstalled at build
6848                                                 # time.
6849                                                 continue
6850                                         if parent.installed:
6851                                                 # Two currently installed packages conflict with
6852                                                 # eachother. Ignore this case since the damage
6853                                                 # is already done and this would be likely to
6854                                                 # confuse users if displayed like a normal blocker.
6855                                                 continue
6856
6857                                         self._blocked_pkgs.add(pkg, blocker)
6858
6859                                         if parent.operation == "merge":
6860                                                 # Maybe the blocked package can be replaced or simply
6861                                                 # unmerged to resolve this block.
6862                                                 depends_on_order.add((pkg, parent))
6863                                                 continue
6864                                         # None of the above blocker resolutions techniques apply,
6865                                         # so apparently this one is unresolvable.
6866                                         unresolved_blocks = True
6867                                 for pkg in blocked_final:
6868                                         if pkg.slot_atom == parent.slot_atom:
6869                                                 # TODO: Support blocks within slots.
6870                                                 continue
6871                                         if parent.operation == "nomerge" and \
6872                                                 pkg.operation == "nomerge":
6873                                                 # This blocker will be handled the next time that a
6874                                                 # merge of either package is triggered.
6875                                                 continue
6876
6877                                         self._blocked_pkgs.add(pkg, blocker)
6878
6879                                         # Maybe the blocking package can be
6880                                         # unmerged to resolve this block.
6881                                         if parent.operation == "merge" and pkg.installed:
6882                                                 depends_on_order.add((pkg, parent))
6883                                                 continue
6884                                         elif parent.operation == "nomerge":
6885                                                 depends_on_order.add((parent, pkg))
6886                                                 continue
6887                                         # None of the above blocker resolutions techniques apply,
6888                                         # so apparently this one is unresolvable.
6889                                         unresolved_blocks = True
6890
6891                                 # Make sure we don't unmerge any package that have been pulled
6892                                 # into the graph.
6893                                 if not unresolved_blocks and depends_on_order:
6894                                         for inst_pkg, inst_task in depends_on_order:
6895                                                 if self.digraph.contains(inst_pkg) and \
6896                                                         self.digraph.parent_nodes(inst_pkg):
6897                                                         unresolved_blocks = True
6898                                                         break
6899
6900                                 if not unresolved_blocks and depends_on_order:
6901                                         for inst_pkg, inst_task in depends_on_order:
6902                                                 uninst_task = Package(built=inst_pkg.built,
6903                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6904                                                         metadata=inst_pkg.metadata,
6905                                                         operation="uninstall",
6906                                                         root_config=inst_pkg.root_config,
6907                                                         type_name=inst_pkg.type_name)
6908                                                 self._pkg_cache[uninst_task] = uninst_task
6909                                                 # Enforce correct merge order with a hard dep.
6910                                                 self.digraph.addnode(uninst_task, inst_task,
6911                                                         priority=BlockerDepPriority.instance)
6912                                                 # Count references to this blocker so that it can be
6913                                                 # invalidated after nodes referencing it have been
6914                                                 # merged.
6915                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6916                                 if not unresolved_blocks and not depends_on_order:
6917                                         self._irrelevant_blockers.add(blocker, parent)
6918                                         self._blocker_parents.remove_edge(blocker, parent)
6919                                         if not self._blocker_parents.parent_nodes(blocker):
6920                                                 self._blocker_parents.remove(blocker)
6921                                         if not self._blocker_parents.child_nodes(parent):
6922                                                 self._blocker_parents.remove(parent)
6923                                 if unresolved_blocks:
6924                                         self._unsolvable_blockers.add(blocker, parent)
6925
6926                 return True
6927
6928         def _accept_blocker_conflicts(self):
6929                 acceptable = False
6930                 for x in ("--buildpkgonly", "--fetchonly",
6931                         "--fetch-all-uri", "--nodeps"):
6932                         if x in self.myopts:
6933                                 acceptable = True
6934                                 break
6935                 return acceptable
6936
6937         def _merge_order_bias(self, mygraph):
6938                 """
6939                 For optimal leaf node selection, promote deep system runtime deps and
6940                 order nodes from highest to lowest overall reference count.
6941                 """
6942
6943                 node_info = {}
6944                 for node in mygraph.order:
6945                         node_info[node] = len(mygraph.parent_nodes(node))
6946                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6947
6948                 def cmp_merge_preference(node1, node2):
6949
6950                         if node1.operation == 'uninstall':
6951                                 if node2.operation == 'uninstall':
6952                                         return 0
6953                                 return 1
6954
6955                         if node2.operation == 'uninstall':
6956                                 if node1.operation == 'uninstall':
6957                                         return 0
6958                                 return -1
6959
6960                         node1_sys = node1 in deep_system_deps
6961                         node2_sys = node2 in deep_system_deps
6962                         if node1_sys != node2_sys:
6963                                 if node1_sys:
6964                                         return -1
6965                                 return 1
6966
6967                         return node_info[node2] - node_info[node1]
6968
6969                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6970
6971         def altlist(self, reversed=False):
6972
6973                 while self._serialized_tasks_cache is None:
6974                         self._resolve_conflicts()
6975                         try:
6976                                 self._serialized_tasks_cache, self._scheduler_graph = \
6977                                         self._serialize_tasks()
6978                         except self._serialize_tasks_retry:
6979                                 pass
6980
6981                 retlist = self._serialized_tasks_cache[:]
6982                 if reversed:
6983                         retlist.reverse()
6984                 return retlist
6985
6986         def schedulerGraph(self):
6987                 """
6988                 The scheduler graph is identical to the normal one except that
6989                 uninstall edges are reversed in specific cases that require
6990                 conflicting packages to be temporarily installed simultaneously.
6991                 This is intended for use by the Scheduler in it's parallelization
6992                 logic. It ensures that temporary simultaneous installation of
6993                 conflicting packages is avoided when appropriate (especially for
6994                 !!atom blockers), but allowed in specific cases that require it.
6995
6996                 Note that this method calls break_refs() which alters the state of
6997                 internal Package instances such that this depgraph instance should
6998                 not be used to perform any more calculations.
6999                 """
7000                 if self._scheduler_graph is None:
7001                         self.altlist()
7002                 self.break_refs(self._scheduler_graph.order)
7003                 return self._scheduler_graph
7004
7005         def break_refs(self, nodes):
7006                 """
7007                 Take a mergelist like that returned from self.altlist() and
7008                 break any references that lead back to the depgraph. This is
7009                 useful if you want to hold references to packages without
7010                 also holding the depgraph on the heap.
7011                 """
7012                 for node in nodes:
7013                         if hasattr(node, "root_config"):
7014                                 # The FakeVartree references the _package_cache which
7015                                 # references the depgraph. So that Package instances don't
7016                                 # hold the depgraph and FakeVartree on the heap, replace
7017                                 # the RootConfig that references the FakeVartree with the
7018                                 # original RootConfig instance which references the actual
7019                                 # vartree.
7020                                 node.root_config = \
7021                                         self._trees_orig[node.root_config.root]["root_config"]
7022
7023         def _resolve_conflicts(self):
7024                 if not self._complete_graph():
7025                         raise self._unknown_internal_error()
7026
7027                 if not self.validate_blockers():
7028                         raise self._unknown_internal_error()
7029
7030                 if self._slot_collision_info:
7031                         self._process_slot_conflicts()
7032
7033         def _serialize_tasks(self):
7034
7035                 if "--debug" in self.myopts:
7036                         writemsg("\ndigraph:\n\n", noiselevel=-1)
7037                         self.digraph.debug_print()
7038                         writemsg("\n", noiselevel=-1)
7039
7040                 scheduler_graph = self.digraph.copy()
7041                 mygraph=self.digraph.copy()
7042                 # Prune "nomerge" root nodes if nothing depends on them, since
7043                 # otherwise they slow down merge order calculation. Don't remove
7044                 # non-root nodes since they help optimize merge order in some cases
7045                 # such as revdep-rebuild.
7046                 removed_nodes = set()
7047                 while True:
7048                         for node in mygraph.root_nodes():
7049                                 if not isinstance(node, Package) or \
7050                                         node.installed or node.onlydeps:
7051                                         removed_nodes.add(node)
7052                         if removed_nodes:
7053                                 self.spinner.update()
7054                                 mygraph.difference_update(removed_nodes)
7055                         if not removed_nodes:
7056                                 break
7057                         removed_nodes.clear()
7058                 self._merge_order_bias(mygraph)
7059                 def cmp_circular_bias(n1, n2):
7060                         """
7061                         RDEPEND is stronger than PDEPEND and this function
7062                         measures such a strength bias within a circular
7063                         dependency relationship.
7064                         """
7065                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7066                                 ignore_priority=priority_range.ignore_medium_soft)
7067                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7068                                 ignore_priority=priority_range.ignore_medium_soft)
7069                         if n1_n2_medium == n2_n1_medium:
7070                                 return 0
7071                         elif n1_n2_medium:
7072                                 return 1
7073                         return -1
7074                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7075                 retlist=[]
7076                 # Contains uninstall tasks that have been scheduled to
7077                 # occur after overlapping blockers have been installed.
7078                 scheduled_uninstalls = set()
7079                 # Contains any Uninstall tasks that have been ignored
7080                 # in order to avoid the circular deps code path. These
7081                 # correspond to blocker conflicts that could not be
7082                 # resolved.
7083                 ignored_uninstall_tasks = set()
7084                 have_uninstall_task = False
7085                 complete = "complete" in self.myparams
7086                 asap_nodes = []
7087
7088                 def get_nodes(**kwargs):
7089                         """
7090                         Returns leaf nodes excluding Uninstall instances
7091                         since those should be executed as late as possible.
7092                         """
7093                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7094                                 if isinstance(node, Package) and \
7095                                         (node.operation != "uninstall" or \
7096                                         node in scheduled_uninstalls)]
7097
7098                 # sys-apps/portage needs special treatment if ROOT="/"
7099                 running_root = self._running_root.root
7100                 from portage.const import PORTAGE_PACKAGE_ATOM
7101                 runtime_deps = InternalPackageSet(
7102                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7103                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7104                         PORTAGE_PACKAGE_ATOM)
7105                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7106                         PORTAGE_PACKAGE_ATOM)
7107
7108                 if running_portage:
7109                         running_portage = running_portage[0]
7110                 else:
7111                         running_portage = None
7112
7113                 if replacement_portage:
7114                         replacement_portage = replacement_portage[0]
7115                 else:
7116                         replacement_portage = None
7117
7118                 if replacement_portage == running_portage:
7119                         replacement_portage = None
7120
7121                 if replacement_portage is not None:
7122                         # update from running_portage to replacement_portage asap
7123                         asap_nodes.append(replacement_portage)
7124
7125                 if running_portage is not None:
7126                         try:
7127                                 portage_rdepend = self._select_atoms_highest_available(
7128                                         running_root, running_portage.metadata["RDEPEND"],
7129                                         myuse=running_portage.use.enabled,
7130                                         parent=running_portage, strict=False)
7131                         except portage.exception.InvalidDependString, e:
7132                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7133                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7134                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7135                                 del e
7136                                 portage_rdepend = []
7137                         runtime_deps.update(atom for atom in portage_rdepend \
7138                                 if not atom.startswith("!"))
7139
7140                 def gather_deps(ignore_priority, mergeable_nodes,
7141                         selected_nodes, node):
7142                         """
7143                         Recursively gather a group of nodes that RDEPEND on
7144                         eachother. This ensures that they are merged as a group
7145                         and get their RDEPENDs satisfied as soon as possible.
7146                         """
7147                         if node in selected_nodes:
7148                                 return True
7149                         if node not in mergeable_nodes:
7150                                 return False
7151                         if node == replacement_portage and \
7152                                 mygraph.child_nodes(node,
7153                                 ignore_priority=priority_range.ignore_medium_soft):
7154                                 # Make sure that portage always has all of it's
7155                                 # RDEPENDs installed first.
7156                                 return False
7157                         selected_nodes.add(node)
7158                         for child in mygraph.child_nodes(node,
7159                                 ignore_priority=ignore_priority):
7160                                 if not gather_deps(ignore_priority,
7161                                         mergeable_nodes, selected_nodes, child):
7162                                         return False
7163                         return True
7164
7165                 def ignore_uninst_or_med(priority):
7166                         if priority is BlockerDepPriority.instance:
7167                                 return True
7168                         return priority_range.ignore_medium(priority)
7169
7170                 def ignore_uninst_or_med_soft(priority):
7171                         if priority is BlockerDepPriority.instance:
7172                                 return True
7173                         return priority_range.ignore_medium_soft(priority)
7174
7175                 tree_mode = "--tree" in self.myopts
7176                 # Tracks whether or not the current iteration should prefer asap_nodes
7177                 # if available.  This is set to False when the previous iteration
7178                 # failed to select any nodes.  It is reset whenever nodes are
7179                 # successfully selected.
7180                 prefer_asap = True
7181
7182                 # Controls whether or not the current iteration should drop edges that
7183                 # are "satisfied" by installed packages, in order to solve circular
7184                 # dependencies. The deep runtime dependencies of installed packages are
7185                 # not checked in this case (bug #199856), so it must be avoided
7186                 # whenever possible.
7187                 drop_satisfied = False
7188
7189                 # State of variables for successive iterations that loosen the
7190                 # criteria for node selection.
7191                 #
7192                 # iteration   prefer_asap   drop_satisfied
7193                 # 1           True          False
7194                 # 2           False         False
7195                 # 3           False         True
7196                 #
7197                 # If no nodes are selected on the last iteration, it is due to
7198                 # unresolved blockers or circular dependencies.
7199
7200                 while not mygraph.empty():
7201                         self.spinner.update()
7202                         selected_nodes = None
7203                         ignore_priority = None
7204                         if drop_satisfied or (prefer_asap and asap_nodes):
7205                                 priority_range = DepPrioritySatisfiedRange
7206                         else:
7207                                 priority_range = DepPriorityNormalRange
7208                         if prefer_asap and asap_nodes:
7209                                 # ASAP nodes are merged before their soft deps. Go ahead and
7210                                 # select root nodes here if necessary, since it's typical for
7211                                 # the parent to have been removed from the graph already.
7212                                 asap_nodes = [node for node in asap_nodes \
7213                                         if mygraph.contains(node)]
7214                                 for node in asap_nodes:
7215                                         if not mygraph.child_nodes(node,
7216                                                 ignore_priority=priority_range.ignore_soft):
7217                                                 selected_nodes = [node]
7218                                                 asap_nodes.remove(node)
7219                                                 break
7220                         if not selected_nodes and \
7221                                 not (prefer_asap and asap_nodes):
7222                                 for i in xrange(priority_range.NONE,
7223                                         priority_range.MEDIUM_SOFT + 1):
7224                                         ignore_priority = priority_range.ignore_priority[i]
7225                                         nodes = get_nodes(ignore_priority=ignore_priority)
7226                                         if nodes:
7227                                                 # If there is a mix of uninstall nodes with other
7228                                                 # types, save the uninstall nodes for later since
7229                                                 # sometimes a merge node will render an uninstall
7230                                                 # node unnecessary (due to occupying the same slot),
7231                                                 # and we want to avoid executing a separate uninstall
7232                                                 # task in that case.
7233                                                 if len(nodes) > 1:
7234                                                         good_uninstalls = []
7235                                                         with_some_uninstalls_excluded = []
7236                                                         for node in nodes:
7237                                                                 if node.operation == "uninstall":
7238                                                                         slot_node = self.mydbapi[node.root
7239                                                                                 ].match_pkgs(node.slot_atom)
7240                                                                         if slot_node and \
7241                                                                                 slot_node[0].operation == "merge":
7242                                                                                 continue
7243                                                                         good_uninstalls.append(node)
7244                                                                 with_some_uninstalls_excluded.append(node)
7245                                                         if good_uninstalls:
7246                                                                 nodes = good_uninstalls
7247                                                         elif with_some_uninstalls_excluded:
7248                                                                 nodes = with_some_uninstalls_excluded
7249                                                         else:
7250                                                                 nodes = nodes
7251
7252                                                 if ignore_priority is None and not tree_mode:
7253                                                         # Greedily pop all of these nodes since no
7254                                                         # relationship has been ignored. This optimization
7255                                                         # destroys --tree output, so it's disabled in tree
7256                                                         # mode.
7257                                                         selected_nodes = nodes
7258                                                 else:
7259                                                         # For optimal merge order:
7260                                                         #  * Only pop one node.
7261                                                         #  * Removing a root node (node without a parent)
7262                                                         #    will not produce a leaf node, so avoid it.
7263                                                         #  * It's normal for a selected uninstall to be a
7264                                                         #    root node, so don't check them for parents.
7265                                                         for node in nodes:
7266                                                                 if node.operation == "uninstall" or \
7267                                                                         mygraph.parent_nodes(node):
7268                                                                         selected_nodes = [node]
7269                                                                         break
7270
7271                                                 if selected_nodes:
7272                                                         break
7273
7274                         if not selected_nodes:
7275                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7276                                 if nodes:
7277                                         mergeable_nodes = set(nodes)
7278                                         if prefer_asap and asap_nodes:
7279                                                 nodes = asap_nodes
7280                                         for i in xrange(priority_range.SOFT,
7281                                                 priority_range.MEDIUM_SOFT + 1):
7282                                                 ignore_priority = priority_range.ignore_priority[i]
7283                                                 for node in nodes:
7284                                                         if not mygraph.parent_nodes(node):
7285                                                                 continue
7286                                                         selected_nodes = set()
7287                                                         if gather_deps(ignore_priority,
7288                                                                 mergeable_nodes, selected_nodes, node):
7289                                                                 break
7290                                                         else:
7291                                                                 selected_nodes = None
7292                                                 if selected_nodes:
7293                                                         break
7294
7295                                         if prefer_asap and asap_nodes and not selected_nodes:
7296                                                 # We failed to find any asap nodes to merge, so ignore
7297                                                 # them for the next iteration.
7298                                                 prefer_asap = False
7299                                                 continue
7300
7301                         if selected_nodes and ignore_priority is not None:
7302                                 # Try to merge ignored medium_soft deps as soon as possible
7303                                 # if they're not satisfied by installed packages.
7304                                 for node in selected_nodes:
7305                                         children = set(mygraph.child_nodes(node))
7306                                         soft = children.difference(
7307                                                 mygraph.child_nodes(node,
7308                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7309                                         medium_soft = children.difference(
7310                                                 mygraph.child_nodes(node,
7311                                                         ignore_priority = \
7312                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7313                                         medium_soft.difference_update(soft)
7314                                         for child in medium_soft:
7315                                                 if child in selected_nodes:
7316                                                         continue
7317                                                 if child in asap_nodes:
7318                                                         continue
7319                                                 asap_nodes.append(child)
7320
7321                         if selected_nodes and len(selected_nodes) > 1:
7322                                 if not isinstance(selected_nodes, list):
7323                                         selected_nodes = list(selected_nodes)
7324                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7325
7326                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7327                                 # An Uninstall task needs to be executed in order to
7328                                 # avoid conflict if possible.
7329
7330                                 if drop_satisfied:
7331                                         priority_range = DepPrioritySatisfiedRange
7332                                 else:
7333                                         priority_range = DepPriorityNormalRange
7334
7335                                 mergeable_nodes = get_nodes(
7336                                         ignore_priority=ignore_uninst_or_med)
7337
7338                                 min_parent_deps = None
7339                                 uninst_task = None
7340                                 for task in myblocker_uninstalls.leaf_nodes():
7341                                         # Do some sanity checks so that system or world packages
7342                                         # don't get uninstalled inappropriately here (only really
7343                                         # necessary when --complete-graph has not been enabled).
7344
7345                                         if task in ignored_uninstall_tasks:
7346                                                 continue
7347
7348                                         if task in scheduled_uninstalls:
7349                                                 # It's been scheduled but it hasn't
7350                                                 # been executed yet due to dependence
7351                                                 # on installation of blocking packages.
7352                                                 continue
7353
7354                                         root_config = self.roots[task.root]
7355                                         inst_pkg = self._pkg_cache[
7356                                                 ("installed", task.root, task.cpv, "nomerge")]
7357
7358                                         if self.digraph.contains(inst_pkg):
7359                                                 continue
7360
7361                                         forbid_overlap = False
7362                                         heuristic_overlap = False
7363                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7364                                                 if blocker.eapi in ("0", "1"):
7365                                                         heuristic_overlap = True
7366                                                 elif blocker.atom.blocker.overlap.forbid:
7367                                                         forbid_overlap = True
7368                                                         break
7369                                         if forbid_overlap and running_root == task.root:
7370                                                 continue
7371
7372                                         if heuristic_overlap and running_root == task.root:
7373                                                 # Never uninstall sys-apps/portage or it's essential
7374                                                 # dependencies, except through replacement.
7375                                                 try:
7376                                                         runtime_dep_atoms = \
7377                                                                 list(runtime_deps.iterAtomsForPackage(task))
7378                                                 except portage.exception.InvalidDependString, e:
7379                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7380                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7381                                                                 (task.root, task.cpv, e), noiselevel=-1)
7382                                                         del e
7383                                                         continue
7384
7385                                                 # Don't uninstall a runtime dep if it appears
7386                                                 # to be the only suitable one installed.
7387                                                 skip = False
7388                                                 vardb = root_config.trees["vartree"].dbapi
7389                                                 for atom in runtime_dep_atoms:
7390                                                         other_version = None
7391                                                         for pkg in vardb.match_pkgs(atom):
7392                                                                 if pkg.cpv == task.cpv and \
7393                                                                         pkg.metadata["COUNTER"] == \
7394                                                                         task.metadata["COUNTER"]:
7395                                                                         continue
7396                                                                 other_version = pkg
7397                                                                 break
7398                                                         if other_version is None:
7399                                                                 skip = True
7400                                                                 break
7401                                                 if skip:
7402                                                         continue
7403
7404                                                 # For packages in the system set, don't take
7405                                                 # any chances. If the conflict can't be resolved
7406                                                 # by a normal replacement operation then abort.
7407                                                 skip = False
7408                                                 try:
7409                                                         for atom in root_config.sets[
7410                                                                 "system"].iterAtomsForPackage(task):
7411                                                                 skip = True
7412                                                                 break
7413                                                 except portage.exception.InvalidDependString, e:
7414                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7415                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7416                                                                 (task.root, task.cpv, e), noiselevel=-1)
7417                                                         del e
7418                                                         skip = True
7419                                                 if skip:
7420                                                         continue
7421
7422                                         # Note that the world check isn't always
7423                                         # necessary since self._complete_graph() will
7424                                         # add all packages from the system and world sets to the
7425                                         # graph. This just allows unresolved conflicts to be
7426                                         # detected as early as possible, which makes it possible
7427                                         # to avoid calling self._complete_graph() when it is
7428                                         # unnecessary due to blockers triggering an abortion.
7429                                         if not complete:
7430                                                 # For packages in the world set, go ahead an uninstall
7431                                                 # when necessary, as long as the atom will be satisfied
7432                                                 # in the final state.
7433                                                 graph_db = self.mydbapi[task.root]
7434                                                 skip = False
7435                                                 try:
7436                                                         for atom in root_config.sets[
7437                                                                 "world"].iterAtomsForPackage(task):
7438                                                                 satisfied = False
7439                                                                 for pkg in graph_db.match_pkgs(atom):
7440                                                                         if pkg == inst_pkg:
7441                                                                                 continue
7442                                                                         satisfied = True
7443                                                                         break
7444                                                                 if not satisfied:
7445                                                                         skip = True
7446                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7447                                                                         break
7448                                                 except portage.exception.InvalidDependString, e:
7449                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7450                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7451                                                                 (task.root, task.cpv, e), noiselevel=-1)
7452                                                         del e
7453                                                         skip = True
7454                                                 if skip:
7455                                                         continue
7456
7457                                         # Check the deps of parent nodes to ensure that
7458                                         # the chosen task produces a leaf node. Maybe
7459                                         # this can be optimized some more to make the
7460                                         # best possible choice, but the current algorithm
7461                                         # is simple and should be near optimal for most
7462                                         # common cases.
7463                                         mergeable_parent = False
7464                                         parent_deps = set()
7465                                         for parent in mygraph.parent_nodes(task):
7466                                                 parent_deps.update(mygraph.child_nodes(parent,
7467                                                         ignore_priority=priority_range.ignore_medium_soft))
7468                                                 if parent in mergeable_nodes and \
7469                                                         gather_deps(ignore_uninst_or_med_soft,
7470                                                         mergeable_nodes, set(), parent):
7471                                                         mergeable_parent = True
7472
7473                                         if not mergeable_parent:
7474                                                 continue
7475
7476                                         parent_deps.remove(task)
7477                                         if min_parent_deps is None or \
7478                                                 len(parent_deps) < min_parent_deps:
7479                                                 min_parent_deps = len(parent_deps)
7480                                                 uninst_task = task
7481
7482                                 if uninst_task is not None:
7483                                         # The uninstall is performed only after blocking
7484                                         # packages have been merged on top of it. File
7485                                         # collisions between blocking packages are detected
7486                                         # and removed from the list of files to be uninstalled.
7487                                         scheduled_uninstalls.add(uninst_task)
7488                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7489
7490                                         # Reverse the parent -> uninstall edges since we want
7491                                         # to do the uninstall after blocking packages have
7492                                         # been merged on top of it.
7493                                         mygraph.remove(uninst_task)
7494                                         for blocked_pkg in parent_nodes:
7495                                                 mygraph.add(blocked_pkg, uninst_task,
7496                                                         priority=BlockerDepPriority.instance)
7497                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7498                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7499                                                         priority=BlockerDepPriority.instance)
7500
7501                                         # Reset the state variables for leaf node selection and
7502                                         # continue trying to select leaf nodes.
7503                                         prefer_asap = True
7504                                         drop_satisfied = False
7505                                         continue
7506
7507                         if not selected_nodes:
7508                                 # Only select root nodes as a last resort. This case should
7509                                 # only trigger when the graph is nearly empty and the only
7510                                 # remaining nodes are isolated (no parents or children). Since
7511                                 # the nodes must be isolated, ignore_priority is not needed.
7512                                 selected_nodes = get_nodes()
7513
7514                         if not selected_nodes and not drop_satisfied:
7515                                 drop_satisfied = True
7516                                 continue
7517
7518                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7519                                 # If possible, drop an uninstall task here in order to avoid
7520                                 # the circular deps code path. The corresponding blocker will
7521                                 # still be counted as an unresolved conflict.
7522                                 uninst_task = None
7523                                 for node in myblocker_uninstalls.leaf_nodes():
7524                                         try:
7525                                                 mygraph.remove(node)
7526                                         except KeyError:
7527                                                 pass
7528                                         else:
7529                                                 uninst_task = node
7530                                                 ignored_uninstall_tasks.add(node)
7531                                                 break
7532
7533                                 if uninst_task is not None:
7534                                         # Reset the state variables for leaf node selection and
7535                                         # continue trying to select leaf nodes.
7536                                         prefer_asap = True
7537                                         drop_satisfied = False
7538                                         continue
7539
7540                         if not selected_nodes:
7541                                 self._circular_deps_for_display = mygraph
7542                                 raise self._unknown_internal_error()
7543
7544                         # At this point, we've succeeded in selecting one or more nodes, so
7545                         # reset state variables for leaf node selection.
7546                         prefer_asap = True
7547                         drop_satisfied = False
7548
7549                         mygraph.difference_update(selected_nodes)
7550
7551                         for node in selected_nodes:
7552                                 if isinstance(node, Package) and \
7553                                         node.operation == "nomerge":
7554                                         continue
7555
7556                                 # Handle interactions between blockers
7557                                 # and uninstallation tasks.
7558                                 solved_blockers = set()
7559                                 uninst_task = None
7560                                 if isinstance(node, Package) and \
7561                                         "uninstall" == node.operation:
7562                                         have_uninstall_task = True
7563                                         uninst_task = node
7564                                 else:
7565                                         vardb = self.trees[node.root]["vartree"].dbapi
7566                                         previous_cpv = vardb.match(node.slot_atom)
7567                                         if previous_cpv:
7568                                                 # The package will be replaced by this one, so remove
7569                                                 # the corresponding Uninstall task if necessary.
7570                                                 previous_cpv = previous_cpv[0]
7571                                                 uninst_task = \
7572                                                         ("installed", node.root, previous_cpv, "uninstall")
7573                                                 try:
7574                                                         mygraph.remove(uninst_task)
7575                                                 except KeyError:
7576                                                         pass
7577
7578                                 if uninst_task is not None and \
7579                                         uninst_task not in ignored_uninstall_tasks and \
7580                                         myblocker_uninstalls.contains(uninst_task):
7581                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7582                                         myblocker_uninstalls.remove(uninst_task)
7583                                         # Discard any blockers that this Uninstall solves.
7584                                         for blocker in blocker_nodes:
7585                                                 if not myblocker_uninstalls.child_nodes(blocker):
7586                                                         myblocker_uninstalls.remove(blocker)
7587                                                         solved_blockers.add(blocker)
7588
7589                                 retlist.append(node)
7590
7591                                 if (isinstance(node, Package) and \
7592                                         "uninstall" == node.operation) or \
7593                                         (uninst_task is not None and \
7594                                         uninst_task in scheduled_uninstalls):
7595                                         # Include satisfied blockers in the merge list
7596                                         # since the user might be interested and also
7597                                         # it serves as an indicator that blocking packages
7598                                         # will be temporarily installed simultaneously.
7599                                         for blocker in solved_blockers:
7600                                                 retlist.append(Blocker(atom=blocker.atom,
7601                                                         root=blocker.root, eapi=blocker.eapi,
7602                                                         satisfied=True))
7603
7604                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7605                 for node in myblocker_uninstalls.root_nodes():
7606                         unsolvable_blockers.add(node)
7607
7608                 for blocker in unsolvable_blockers:
7609                         retlist.append(blocker)
7610
7611                 # If any Uninstall tasks need to be executed in order
7612                 # to avoid a conflict, complete the graph with any
7613                 # dependencies that may have been initially
7614                 # neglected (to ensure that unsafe Uninstall tasks
7615                 # are properly identified and blocked from execution).
7616                 if have_uninstall_task and \
7617                         not complete and \
7618                         not unsolvable_blockers:
7619                         self.myparams.add("complete")
7620                         raise self._serialize_tasks_retry("")
7621
7622                 if unsolvable_blockers and \
7623                         not self._accept_blocker_conflicts():
7624                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7625                         self._serialized_tasks_cache = retlist[:]
7626                         self._scheduler_graph = scheduler_graph
7627                         raise self._unknown_internal_error()
7628
7629                 if self._slot_collision_info and \
7630                         not self._accept_blocker_conflicts():
7631                         self._serialized_tasks_cache = retlist[:]
7632                         self._scheduler_graph = scheduler_graph
7633                         raise self._unknown_internal_error()
7634
7635                 return retlist, scheduler_graph
7636
7637         def _show_circular_deps(self, mygraph):
7638                 # No leaf nodes are available, so we have a circular
7639                 # dependency panic situation.  Reduce the noise level to a
7640                 # minimum via repeated elimination of root nodes since they
7641                 # have no parents and thus can not be part of a cycle.
7642                 while True:
7643                         root_nodes = mygraph.root_nodes(
7644                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7645                         if not root_nodes:
7646                                 break
7647                         mygraph.difference_update(root_nodes)
7648                 # Display the USE flags that are enabled on nodes that are part
7649                 # of dependency cycles in case that helps the user decide to
7650                 # disable some of them.
7651                 display_order = []
7652                 tempgraph = mygraph.copy()
7653                 while not tempgraph.empty():
7654                         nodes = tempgraph.leaf_nodes()
7655                         if not nodes:
7656                                 node = tempgraph.order[0]
7657                         else:
7658                                 node = nodes[0]
7659                         display_order.append(node)
7660                         tempgraph.remove(node)
7661                 display_order.reverse()
7662                 self.myopts.pop("--quiet", None)
7663                 self.myopts.pop("--verbose", None)
7664                 self.myopts["--tree"] = True
7665                 portage.writemsg("\n\n", noiselevel=-1)
7666                 self.display(display_order)
7667                 prefix = colorize("BAD", " * ")
7668                 portage.writemsg("\n", noiselevel=-1)
7669                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7670                         noiselevel=-1)
7671                 portage.writemsg("\n", noiselevel=-1)
7672                 mygraph.debug_print()
7673                 portage.writemsg("\n", noiselevel=-1)
7674                 portage.writemsg(prefix + "Note that circular dependencies " + \
7675                         "can often be avoided by temporarily\n", noiselevel=-1)
7676                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7677                         "optional dependencies.\n", noiselevel=-1)
7678
7679         def _show_merge_list(self):
7680                 if self._serialized_tasks_cache is not None and \
7681                         not (self._displayed_list and \
7682                         (self._displayed_list == self._serialized_tasks_cache or \
7683                         self._displayed_list == \
7684                                 list(reversed(self._serialized_tasks_cache)))):
7685                         display_list = self._serialized_tasks_cache[:]
7686                         if "--tree" in self.myopts:
7687                                 display_list.reverse()
7688                         self.display(display_list)
7689
7690         def _show_unsatisfied_blockers(self, blockers):
7691                 self._show_merge_list()
7692                 msg = "Error: The above package list contains " + \
7693                         "packages which cannot be installed " + \
7694                         "at the same time on the same system."
7695                 prefix = colorize("BAD", " * ")
7696                 from textwrap import wrap
7697                 portage.writemsg("\n", noiselevel=-1)
7698                 for line in wrap(msg, 70):
7699                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7700
7701                 # Display the conflicting packages along with the packages
7702                 # that pulled them in. This is helpful for troubleshooting
7703                 # cases in which blockers don't solve automatically and
7704                 # the reasons are not apparent from the normal merge list
7705                 # display.
7706
7707                 conflict_pkgs = {}
7708                 for blocker in blockers:
7709                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7710                                 self._blocker_parents.parent_nodes(blocker)):
7711                                 parent_atoms = self._parent_atoms.get(pkg)
7712                                 if not parent_atoms:
7713                                         atom = self._blocked_world_pkgs.get(pkg)
7714                                         if atom is not None:
7715                                                 parent_atoms = set([("@world", atom)])
7716                                 if parent_atoms:
7717                                         conflict_pkgs[pkg] = parent_atoms
7718
7719                 if conflict_pkgs:
7720                         # Reduce noise by pruning packages that are only
7721                         # pulled in by other conflict packages.
7722                         pruned_pkgs = set()
7723                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7724                                 relevant_parent = False
7725                                 for parent, atom in parent_atoms:
7726                                         if parent not in conflict_pkgs:
7727                                                 relevant_parent = True
7728                                                 break
7729                                 if not relevant_parent:
7730                                         pruned_pkgs.add(pkg)
7731                         for pkg in pruned_pkgs:
7732                                 del conflict_pkgs[pkg]
7733
7734                 if conflict_pkgs:
7735                         msg = []
7736                         msg.append("\n")
7737                         indent = "  "
7738                         # Max number of parents shown, to avoid flooding the display.
7739                         max_parents = 3
7740                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7741
7742                                 pruned_list = set()
7743
7744                                 # Prefer packages that are not directly involved in a conflict.
7745                                 for parent_atom in parent_atoms:
7746                                         if len(pruned_list) >= max_parents:
7747                                                 break
7748                                         parent, atom = parent_atom
7749                                         if parent not in conflict_pkgs:
7750                                                 pruned_list.add(parent_atom)
7751
7752                                 for parent_atom in parent_atoms:
7753                                         if len(pruned_list) >= max_parents:
7754                                                 break
7755                                         pruned_list.add(parent_atom)
7756
7757                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7758                                 msg.append(indent + "%s pulled in by\n" % pkg)
7759
7760                                 for parent_atom in pruned_list:
7761                                         parent, atom = parent_atom
7762                                         msg.append(2*indent)
7763                                         if isinstance(parent,
7764                                                 (PackageArg, AtomArg)):
7765                                                 # For PackageArg and AtomArg types, it's
7766                                                 # redundant to display the atom attribute.
7767                                                 msg.append(str(parent))
7768                                         else:
7769                                                 # Display the specific atom from SetArg or
7770                                                 # Package types.
7771                                                 msg.append("%s required by %s" % (atom, parent))
7772                                         msg.append("\n")
7773
7774                                 if omitted_parents:
7775                                         msg.append(2*indent)
7776                                         msg.append("(and %d more)\n" % omitted_parents)
7777
7778                                 msg.append("\n")
7779
7780                         sys.stderr.write("".join(msg))
7781                         sys.stderr.flush()
7782
7783                 if "--quiet" not in self.myopts:
7784                         show_blocker_docs_link()
7785
7786         def display(self, mylist, favorites=[], verbosity=None):
7787
7788                 # This is used to prevent display_problems() from
7789                 # redundantly displaying this exact same merge list
7790                 # again via _show_merge_list().
7791                 self._displayed_list = mylist
7792
7793                 if verbosity is None:
7794                         verbosity = ("--quiet" in self.myopts and 1 or \
7795                                 "--verbose" in self.myopts and 3 or 2)
7796                 favorites_set = InternalPackageSet(favorites)
7797                 oneshot = "--oneshot" in self.myopts or \
7798                         "--onlydeps" in self.myopts
7799                 columns = "--columns" in self.myopts
7800                 changelogs=[]
7801                 p=[]
7802                 blockers = []
7803
7804                 counters = PackageCounters()
7805
7806                 if verbosity == 1 and "--verbose" not in self.myopts:
7807                         def create_use_string(*args):
7808                                 return ""
7809                 else:
7810                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7811                                 old_iuse, old_use,
7812                                 is_new, reinst_flags,
7813                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7814                                 alphabetical=("--alphabetical" in self.myopts)):
7815                                 enabled = []
7816                                 if alphabetical:
7817                                         disabled = enabled
7818                                         removed = enabled
7819                                 else:
7820                                         disabled = []
7821                                         removed = []
7822                                 cur_iuse = set(cur_iuse)
7823                                 enabled_flags = cur_iuse.intersection(cur_use)
7824                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7825                                 any_iuse = cur_iuse.union(old_iuse)
7826                                 any_iuse = list(any_iuse)
7827                                 any_iuse.sort()
7828                                 for flag in any_iuse:
7829                                         flag_str = None
7830                                         isEnabled = False
7831                                         reinst_flag = reinst_flags and flag in reinst_flags
7832                                         if flag in enabled_flags:
7833                                                 isEnabled = True
7834                                                 if is_new or flag in old_use and \
7835                                                         (all_flags or reinst_flag):
7836                                                         flag_str = red(flag)
7837                                                 elif flag not in old_iuse:
7838                                                         flag_str = yellow(flag) + "%*"
7839                                                 elif flag not in old_use:
7840                                                         flag_str = green(flag) + "*"
7841                                         elif flag in removed_iuse:
7842                                                 if all_flags or reinst_flag:
7843                                                         flag_str = yellow("-" + flag) + "%"
7844                                                         if flag in old_use:
7845                                                                 flag_str += "*"
7846                                                         flag_str = "(" + flag_str + ")"
7847                                                         removed.append(flag_str)
7848                                                 continue
7849                                         else:
7850                                                 if is_new or flag in old_iuse and \
7851                                                         flag not in old_use and \
7852                                                         (all_flags or reinst_flag):
7853                                                         flag_str = blue("-" + flag)
7854                                                 elif flag not in old_iuse:
7855                                                         flag_str = yellow("-" + flag)
7856                                                         if flag not in iuse_forced:
7857                                                                 flag_str += "%"
7858                                                 elif flag in old_use:
7859                                                         flag_str = green("-" + flag) + "*"
7860                                         if flag_str:
7861                                                 if flag in iuse_forced:
7862                                                         flag_str = "(" + flag_str + ")"
7863                                                 if isEnabled:
7864                                                         enabled.append(flag_str)
7865                                                 else:
7866                                                         disabled.append(flag_str)
7867
7868                                 if alphabetical:
7869                                         ret = " ".join(enabled)
7870                                 else:
7871                                         ret = " ".join(enabled + disabled + removed)
7872                                 if ret:
7873                                         ret = '%s="%s" ' % (name, ret)
7874                                 return ret
7875
7876                 repo_display = RepoDisplay(self.roots)
7877
7878                 tree_nodes = []
7879                 display_list = []
7880                 mygraph = self.digraph.copy()
7881
7882                 # If there are any Uninstall instances, add the corresponding
7883                 # blockers to the digraph (useful for --tree display).
7884
7885                 executed_uninstalls = set(node for node in mylist \
7886                         if isinstance(node, Package) and node.operation == "unmerge")
7887
7888                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7889                         uninstall_parents = \
7890                                 self._blocker_uninstalls.parent_nodes(uninstall)
7891                         if not uninstall_parents:
7892                                 continue
7893
7894                         # Remove the corresponding "nomerge" node and substitute
7895                         # the Uninstall node.
7896                         inst_pkg = self._pkg_cache[
7897                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7898                         try:
7899                                 mygraph.remove(inst_pkg)
7900                         except KeyError:
7901                                 pass
7902
7903                         try:
7904                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7905                         except KeyError:
7906                                 inst_pkg_blockers = []
7907
7908                         # Break the Package -> Uninstall edges.
7909                         mygraph.remove(uninstall)
7910
7911                         # Resolution of a package's blockers
7912                         # depend on it's own uninstallation.
7913                         for blocker in inst_pkg_blockers:
7914                                 mygraph.add(uninstall, blocker)
7915
7916                         # Expand Package -> Uninstall edges into
7917                         # Package -> Blocker -> Uninstall edges.
7918                         for blocker in uninstall_parents:
7919                                 mygraph.add(uninstall, blocker)
7920                                 for parent in self._blocker_parents.parent_nodes(blocker):
7921                                         if parent != inst_pkg:
7922                                                 mygraph.add(blocker, parent)
7923
7924                         # If the uninstall task did not need to be executed because
7925                         # of an upgrade, display Blocker -> Upgrade edges since the
7926                         # corresponding Blocker -> Uninstall edges will not be shown.
7927                         upgrade_node = \
7928                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7929                         if upgrade_node is not None and \
7930                                 uninstall not in executed_uninstalls:
7931                                 for blocker in uninstall_parents:
7932                                         mygraph.add(upgrade_node, blocker)
7933
7934                 unsatisfied_blockers = []
7935                 i = 0
7936                 depth = 0
7937                 shown_edges = set()
7938                 for x in mylist:
7939                         if isinstance(x, Blocker) and not x.satisfied:
7940                                 unsatisfied_blockers.append(x)
7941                                 continue
7942                         graph_key = x
7943                         if "--tree" in self.myopts:
7944                                 depth = len(tree_nodes)
7945                                 while depth and graph_key not in \
7946                                         mygraph.child_nodes(tree_nodes[depth-1]):
7947                                                 depth -= 1
7948                                 if depth:
7949                                         tree_nodes = tree_nodes[:depth]
7950                                         tree_nodes.append(graph_key)
7951                                         display_list.append((x, depth, True))
7952                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7953                                 else:
7954                                         traversed_nodes = set() # prevent endless circles
7955                                         traversed_nodes.add(graph_key)
7956                                         def add_parents(current_node, ordered):
7957                                                 parent_nodes = None
7958                                                 # Do not traverse to parents if this node is an
7959                                                 # an argument or a direct member of a set that has
7960                                                 # been specified as an argument (system or world).
7961                                                 if current_node not in self._set_nodes:
7962                                                         parent_nodes = mygraph.parent_nodes(current_node)
7963                                                 if parent_nodes:
7964                                                         child_nodes = set(mygraph.child_nodes(current_node))
7965                                                         selected_parent = None
7966                                                         # First, try to avoid a direct cycle.
7967                                                         for node in parent_nodes:
7968                                                                 if not isinstance(node, (Blocker, Package)):
7969                                                                         continue
7970                                                                 if node not in traversed_nodes and \
7971                                                                         node not in child_nodes:
7972                                                                         edge = (current_node, node)
7973                                                                         if edge in shown_edges:
7974                                                                                 continue
7975                                                                         selected_parent = node
7976                                                                         break
7977                                                         if not selected_parent:
7978                                                                 # A direct cycle is unavoidable.
7979                                                                 for node in parent_nodes:
7980                                                                         if not isinstance(node, (Blocker, Package)):
7981                                                                                 continue
7982                                                                         if node not in traversed_nodes:
7983                                                                                 edge = (current_node, node)
7984                                                                                 if edge in shown_edges:
7985                                                                                         continue
7986                                                                                 selected_parent = node
7987                                                                                 break
7988                                                         if selected_parent:
7989                                                                 shown_edges.add((current_node, selected_parent))
7990                                                                 traversed_nodes.add(selected_parent)
7991                                                                 add_parents(selected_parent, False)
7992                                                 display_list.append((current_node,
7993                                                         len(tree_nodes), ordered))
7994                                                 tree_nodes.append(current_node)
7995                                         tree_nodes = []
7996                                         add_parents(graph_key, True)
7997                         else:
7998                                 display_list.append((x, depth, True))
7999                 mylist = display_list
8000                 for x in unsatisfied_blockers:
8001                         mylist.append((x, 0, True))
8002
8003                 last_merge_depth = 0
8004                 for i in xrange(len(mylist)-1,-1,-1):
8005                         graph_key, depth, ordered = mylist[i]
8006                         if not ordered and depth == 0 and i > 0 \
8007                                 and graph_key == mylist[i-1][0] and \
8008                                 mylist[i-1][1] == 0:
8009                                 # An ordered node got a consecutive duplicate when the tree was
8010                                 # being filled in.
8011                                 del mylist[i]
8012                                 continue
8013                         if ordered and graph_key[-1] != "nomerge":
8014                                 last_merge_depth = depth
8015                                 continue
8016                         if depth >= last_merge_depth or \
8017                                 i < len(mylist) - 1 and \
8018                                 depth >= mylist[i+1][1]:
8019                                         del mylist[i]
8020
8021                 from portage import flatten
8022                 from portage.dep import use_reduce, paren_reduce
8023                 # files to fetch list - avoids counting a same file twice
8024                 # in size display (verbose mode)
8025                 myfetchlist=[]
8026
8027                 # Use this set to detect when all the "repoadd" strings are "[0]"
8028                 # and disable the entire repo display in this case.
8029                 repoadd_set = set()
8030
8031                 for mylist_index in xrange(len(mylist)):
8032                         x, depth, ordered = mylist[mylist_index]
8033                         pkg_type = x[0]
8034                         myroot = x[1]
8035                         pkg_key = x[2]
8036                         portdb = self.trees[myroot]["porttree"].dbapi
8037                         bindb  = self.trees[myroot]["bintree"].dbapi
8038                         vardb = self.trees[myroot]["vartree"].dbapi
8039                         vartree = self.trees[myroot]["vartree"]
8040                         pkgsettings = self.pkgsettings[myroot]
8041
8042                         fetch=" "
8043                         indent = " " * depth
8044
8045                         if isinstance(x, Blocker):
8046                                 if x.satisfied:
8047                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8048                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8049                                 else:
8050                                         blocker_style = "PKG_BLOCKER"
8051                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8052                                 if ordered:
8053                                         counters.blocks += 1
8054                                         if x.satisfied:
8055                                                 counters.blocks_satisfied += 1
8056                                 resolved = portage.key_expand(
8057                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8058                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8059                                         addl += " " + colorize(blocker_style, resolved)
8060                                 else:
8061                                         addl = "[%s %s] %s%s" % \
8062                                                 (colorize(blocker_style, "blocks"),
8063                                                 addl, indent, colorize(blocker_style, resolved))
8064                                 block_parents = self._blocker_parents.parent_nodes(x)
8065                                 block_parents = set([pnode[2] for pnode in block_parents])
8066                                 block_parents = ", ".join(block_parents)
8067                                 if resolved!=x[2]:
8068                                         addl += colorize(blocker_style,
8069                                                 " (\"%s\" is blocking %s)") % \
8070                                                 (str(x.atom).lstrip("!"), block_parents)
8071                                 else:
8072                                         addl += colorize(blocker_style,
8073                                                 " (is blocking %s)") % block_parents
8074                                 if isinstance(x, Blocker) and x.satisfied:
8075                                         if columns:
8076                                                 continue
8077                                         p.append(addl)
8078                                 else:
8079                                         blockers.append(addl)
8080                         else:
8081                                 pkg_status = x[3]
8082                                 pkg_merge = ordered and pkg_status == "merge"
8083                                 if not pkg_merge and pkg_status == "merge":
8084                                         pkg_status = "nomerge"
8085                                 built = pkg_type != "ebuild"
8086                                 installed = pkg_type == "installed"
8087                                 pkg = x
8088                                 metadata = pkg.metadata
8089                                 ebuild_path = None
8090                                 repo_name = metadata["repository"]
8091                                 if pkg_type == "ebuild":
8092                                         ebuild_path = portdb.findname(pkg_key)
8093                                         if not ebuild_path: # shouldn't happen
8094                                                 raise portage.exception.PackageNotFound(pkg_key)
8095                                         repo_path_real = os.path.dirname(os.path.dirname(
8096                                                 os.path.dirname(ebuild_path)))
8097                                 else:
8098                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8099                                 pkg_use = list(pkg.use.enabled)
8100                                 try:
8101                                         restrict = flatten(use_reduce(paren_reduce(
8102                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8103                                 except portage.exception.InvalidDependString, e:
8104                                         if not pkg.installed:
8105                                                 show_invalid_depstring_notice(x,
8106                                                         pkg.metadata["RESTRICT"], str(e))
8107                                                 del e
8108                                                 return 1
8109                                         restrict = []
8110                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8111                                         "fetch" in restrict:
8112                                         fetch = red("F")
8113                                         if ordered:
8114                                                 counters.restrict_fetch += 1
8115                                         if portdb.fetch_check(pkg_key, pkg_use):
8116                                                 fetch = green("f")
8117                                                 if ordered:
8118                                                         counters.restrict_fetch_satisfied += 1
8119
8120                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8121                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8122                                 myoldbest = []
8123                                 myinslotlist = None
8124                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8125                                 if vardb.cpv_exists(pkg_key):
8126                                         addl="  "+yellow("R")+fetch+"  "
8127                                         if ordered:
8128                                                 if pkg_merge:
8129                                                         counters.reinst += 1
8130                                                 elif pkg_status == "uninstall":
8131                                                         counters.uninst += 1
8132                                 # filter out old-style virtual matches
8133                                 elif installed_versions and \
8134                                         portage.cpv_getkey(installed_versions[0]) == \
8135                                         portage.cpv_getkey(pkg_key):
8136                                         myinslotlist = vardb.match(pkg.slot_atom)
8137                                         # If this is the first install of a new-style virtual, we
8138                                         # need to filter out old-style virtual matches.
8139                                         if myinslotlist and \
8140                                                 portage.cpv_getkey(myinslotlist[0]) != \
8141                                                 portage.cpv_getkey(pkg_key):
8142                                                 myinslotlist = None
8143                                         if myinslotlist:
8144                                                 myoldbest = myinslotlist[:]
8145                                                 addl = "   " + fetch
8146                                                 if not portage.dep.cpvequal(pkg_key,
8147                                                         portage.best([pkg_key] + myoldbest)):
8148                                                         # Downgrade in slot
8149                                                         addl += turquoise("U")+blue("D")
8150                                                         if ordered:
8151                                                                 counters.downgrades += 1
8152                                                 else:
8153                                                         # Update in slot
8154                                                         addl += turquoise("U") + " "
8155                                                         if ordered:
8156                                                                 counters.upgrades += 1
8157                                         else:
8158                                                 # New slot, mark it new.
8159                                                 addl = " " + green("NS") + fetch + "  "
8160                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8161                                                 if ordered:
8162                                                         counters.newslot += 1
8163
8164                                         if "--changelog" in self.myopts:
8165                                                 inst_matches = vardb.match(pkg.slot_atom)
8166                                                 if inst_matches:
8167                                                         changelogs.extend(self.calc_changelog(
8168                                                                 portdb.findname(pkg_key),
8169                                                                 inst_matches[0], pkg_key))
8170                                 else:
8171                                         addl = " " + green("N") + " " + fetch + "  "
8172                                         if ordered:
8173                                                 counters.new += 1
8174
8175                                 verboseadd = ""
8176                                 repoadd = None
8177
8178                                 if True:
8179                                         # USE flag display
8180                                         forced_flags = set()
8181                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8182                                         forced_flags.update(pkgsettings.useforce)
8183                                         forced_flags.update(pkgsettings.usemask)
8184
8185                                         cur_use = [flag for flag in pkg.use.enabled \
8186                                                 if flag in pkg.iuse.all]
8187                                         cur_iuse = sorted(pkg.iuse.all)
8188
8189                                         if myoldbest and myinslotlist:
8190                                                 previous_cpv = myoldbest[0]
8191                                         else:
8192                                                 previous_cpv = pkg.cpv
8193                                         if vardb.cpv_exists(previous_cpv):
8194                                                 old_iuse, old_use = vardb.aux_get(
8195                                                                 previous_cpv, ["IUSE", "USE"])
8196                                                 old_iuse = list(set(
8197                                                         filter_iuse_defaults(old_iuse.split())))
8198                                                 old_iuse.sort()
8199                                                 old_use = old_use.split()
8200                                                 is_new = False
8201                                         else:
8202                                                 old_iuse = []
8203                                                 old_use = []
8204                                                 is_new = True
8205
8206                                         old_use = [flag for flag in old_use if flag in old_iuse]
8207
8208                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8209                                         use_expand.sort()
8210                                         use_expand.reverse()
8211                                         use_expand_hidden = \
8212                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8213
8214                                         def map_to_use_expand(myvals, forcedFlags=False,
8215                                                 removeHidden=True):
8216                                                 ret = {}
8217                                                 forced = {}
8218                                                 for exp in use_expand:
8219                                                         ret[exp] = []
8220                                                         forced[exp] = set()
8221                                                         for val in myvals[:]:
8222                                                                 if val.startswith(exp.lower()+"_"):
8223                                                                         if val in forced_flags:
8224                                                                                 forced[exp].add(val[len(exp)+1:])
8225                                                                         ret[exp].append(val[len(exp)+1:])
8226                                                                         myvals.remove(val)
8227                                                 ret["USE"] = myvals
8228                                                 forced["USE"] = [val for val in myvals \
8229                                                         if val in forced_flags]
8230                                                 if removeHidden:
8231                                                         for exp in use_expand_hidden:
8232                                                                 ret.pop(exp, None)
8233                                                 if forcedFlags:
8234                                                         return ret, forced
8235                                                 return ret
8236
8237                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8238                                         # are the only thing that triggered reinstallation.
8239                                         reinst_flags_map = {}
8240                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8241                                         reinst_expand_map = None
8242                                         if reinstall_for_flags:
8243                                                 reinst_flags_map = map_to_use_expand(
8244                                                         list(reinstall_for_flags), removeHidden=False)
8245                                                 for k in list(reinst_flags_map):
8246                                                         if not reinst_flags_map[k]:
8247                                                                 del reinst_flags_map[k]
8248                                                 if not reinst_flags_map.get("USE"):
8249                                                         reinst_expand_map = reinst_flags_map.copy()
8250                                                         reinst_expand_map.pop("USE", None)
8251                                         if reinst_expand_map and \
8252                                                 not set(reinst_expand_map).difference(
8253                                                 use_expand_hidden):
8254                                                 use_expand_hidden = \
8255                                                         set(use_expand_hidden).difference(
8256                                                         reinst_expand_map)
8257
8258                                         cur_iuse_map, iuse_forced = \
8259                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8260                                         cur_use_map = map_to_use_expand(cur_use)
8261                                         old_iuse_map = map_to_use_expand(old_iuse)
8262                                         old_use_map = map_to_use_expand(old_use)
8263
8264                                         use_expand.sort()
8265                                         use_expand.insert(0, "USE")
8266                                         
8267                                         for key in use_expand:
8268                                                 if key in use_expand_hidden:
8269                                                         continue
8270                                                 verboseadd += create_use_string(key.upper(),
8271                                                         cur_iuse_map[key], iuse_forced[key],
8272                                                         cur_use_map[key], old_iuse_map[key],
8273                                                         old_use_map[key], is_new,
8274                                                         reinst_flags_map.get(key))
8275
8276                                 if verbosity == 3:
8277                                         # size verbose
8278                                         mysize=0
8279                                         if pkg_type == "ebuild" and pkg_merge:
8280                                                 try:
8281                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8282                                                                 useflags=pkg_use, debug=self.edebug)
8283                                                 except portage.exception.InvalidDependString, e:
8284                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8285                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8286                                                         del e
8287                                                         return 1
8288                                                 if myfilesdict is None:
8289                                                         myfilesdict="[empty/missing/bad digest]"
8290                                                 else:
8291                                                         for myfetchfile in myfilesdict:
8292                                                                 if myfetchfile not in myfetchlist:
8293                                                                         mysize+=myfilesdict[myfetchfile]
8294                                                                         myfetchlist.append(myfetchfile)
8295                                                         if ordered:
8296                                                                 counters.totalsize += mysize
8297                                                 verboseadd += format_size(mysize)
8298
8299                                         # overlay verbose
8300                                         # assign index for a previous version in the same slot
8301                                         has_previous = False
8302                                         repo_name_prev = None
8303                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8304                                                 metadata["SLOT"])
8305                                         slot_matches = vardb.match(slot_atom)
8306                                         if slot_matches:
8307                                                 has_previous = True
8308                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8309                                                         ["repository"])[0]
8310
8311                                         # now use the data to generate output
8312                                         if pkg.installed or not has_previous:
8313                                                 repoadd = repo_display.repoStr(repo_path_real)
8314                                         else:
8315                                                 repo_path_prev = None
8316                                                 if repo_name_prev:
8317                                                         repo_path_prev = portdb.getRepositoryPath(
8318                                                                 repo_name_prev)
8319                                                 if repo_path_prev == repo_path_real:
8320                                                         repoadd = repo_display.repoStr(repo_path_real)
8321                                                 else:
8322                                                         repoadd = "%s=>%s" % (
8323                                                                 repo_display.repoStr(repo_path_prev),
8324                                                                 repo_display.repoStr(repo_path_real))
8325                                         if repoadd:
8326                                                 repoadd_set.add(repoadd)
8327
8328                                 xs = [portage.cpv_getkey(pkg_key)] + \
8329                                         list(portage.catpkgsplit(pkg_key)[2:])
8330                                 if xs[2] == "r0":
8331                                         xs[2] = ""
8332                                 else:
8333                                         xs[2] = "-" + xs[2]
8334
8335                                 mywidth = 130
8336                                 if "COLUMNWIDTH" in self.settings:
8337                                         try:
8338                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8339                                         except ValueError, e:
8340                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8341                                                 portage.writemsg(
8342                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8343                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8344                                                 del e
8345                                 oldlp = mywidth - 30
8346                                 newlp = oldlp - 30
8347
8348                                 # Convert myoldbest from a list to a string.
8349                                 if not myoldbest:
8350                                         myoldbest = ""
8351                                 else:
8352                                         for pos, key in enumerate(myoldbest):
8353                                                 key = portage.catpkgsplit(key)[2] + \
8354                                                         "-" + portage.catpkgsplit(key)[3]
8355                                                 if key[-3:] == "-r0":
8356                                                         key = key[:-3]
8357                                                 myoldbest[pos] = key
8358                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8359
8360                                 pkg_cp = xs[0]
8361                                 root_config = self.roots[myroot]
8362                                 system_set = root_config.sets["system"]
8363                                 world_set  = root_config.sets["world"]
8364
8365                                 pkg_system = False
8366                                 pkg_world = False
8367                                 try:
8368                                         pkg_system = system_set.findAtomForPackage(pkg)
8369                                         pkg_world  = world_set.findAtomForPackage(pkg)
8370                                         if not (oneshot or pkg_world) and \
8371                                                 myroot == self.target_root and \
8372                                                 favorites_set.findAtomForPackage(pkg):
8373                                                 # Maybe it will be added to world now.
8374                                                 if create_world_atom(pkg, favorites_set, root_config):
8375                                                         pkg_world = True
8376                                 except portage.exception.InvalidDependString:
8377                                         # This is reported elsewhere if relevant.
8378                                         pass
8379
8380                                 def pkgprint(pkg_str):
8381                                         if pkg_merge:
8382                                                 if pkg_system:
8383                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8384                                                 elif pkg_world:
8385                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8386                                                 else:
8387                                                         return colorize("PKG_MERGE", pkg_str)
8388                                         elif pkg_status == "uninstall":
8389                                                 return colorize("PKG_UNINSTALL", pkg_str)
8390                                         else:
8391                                                 if pkg_system:
8392                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8393                                                 elif pkg_world:
8394                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8395                                                 else:
8396                                                         return colorize("PKG_NOMERGE", pkg_str)
8397
8398                                 try:
8399                                         properties = flatten(use_reduce(paren_reduce(
8400                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8401                                 except portage.exception.InvalidDependString, e:
8402                                         if not pkg.installed:
8403                                                 show_invalid_depstring_notice(pkg,
8404                                                         pkg.metadata["PROPERTIES"], str(e))
8405                                                 del e
8406                                                 return 1
8407                                         properties = []
8408                                 interactive = "interactive" in properties
8409                                 if interactive and pkg.operation == "merge":
8410                                         addl = colorize("WARN", "I") + addl[1:]
8411                                         if ordered:
8412                                                 counters.interactive += 1
8413
8414                                 if x[1]!="/":
8415                                         if myoldbest:
8416                                                 myoldbest +=" "
8417                                         if "--columns" in self.myopts:
8418                                                 if "--quiet" in self.myopts:
8419                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8420                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8421                                                         myprint=myprint+myoldbest
8422                                                         myprint=myprint+darkgreen("to "+x[1])
8423                                                         verboseadd = None
8424                                                 else:
8425                                                         if not pkg_merge:
8426                                                                 myprint = "[%s] %s%s" % \
8427                                                                         (pkgprint(pkg_status.ljust(13)),
8428                                                                         indent, pkgprint(pkg.cp))
8429                                                         else:
8430                                                                 myprint = "[%s %s] %s%s" % \
8431                                                                         (pkgprint(pkg.type_name), addl,
8432                                                                         indent, pkgprint(pkg.cp))
8433                                                         if (newlp-nc_len(myprint)) > 0:
8434                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8435                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8436                                                         if (oldlp-nc_len(myprint)) > 0:
8437                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8438                                                         myprint=myprint+myoldbest
8439                                                         myprint += darkgreen("to " + pkg.root)
8440                                         else:
8441                                                 if not pkg_merge:
8442                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8443                                                 else:
8444                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8445                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8446                                                         myoldbest + darkgreen("to " + myroot)
8447                                 else:
8448                                         if "--columns" in self.myopts:
8449                                                 if "--quiet" in self.myopts:
8450                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8451                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8452                                                         myprint=myprint+myoldbest
8453                                                         verboseadd = None
8454                                                 else:
8455                                                         if not pkg_merge:
8456                                                                 myprint = "[%s] %s%s" % \
8457                                                                         (pkgprint(pkg_status.ljust(13)),
8458                                                                         indent, pkgprint(pkg.cp))
8459                                                         else:
8460                                                                 myprint = "[%s %s] %s%s" % \
8461                                                                         (pkgprint(pkg.type_name), addl,
8462                                                                         indent, pkgprint(pkg.cp))
8463                                                         if (newlp-nc_len(myprint)) > 0:
8464                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8465                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8466                                                         if (oldlp-nc_len(myprint)) > 0:
8467                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8468                                                         myprint += myoldbest
8469                                         else:
8470                                                 if not pkg_merge:
8471                                                         myprint = "[%s] %s%s %s" % \
8472                                                                 (pkgprint(pkg_status.ljust(13)),
8473                                                                 indent, pkgprint(pkg.cpv),
8474                                                                 myoldbest)
8475                                                 else:
8476                                                         myprint = "[%s %s] %s%s %s" % \
8477                                                                 (pkgprint(pkg_type), addl, indent,
8478                                                                 pkgprint(pkg.cpv), myoldbest)
8479
8480                                 if columns and pkg.operation == "uninstall":
8481                                         continue
8482                                 p.append((myprint, verboseadd, repoadd))
8483
8484                                 if "--tree" not in self.myopts and \
8485                                         "--quiet" not in self.myopts and \
8486                                         not self._opts_no_restart.intersection(self.myopts) and \
8487                                         pkg.root == self._running_root.root and \
8488                                         portage.match_from_list(
8489                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8490                                         not vardb.cpv_exists(pkg.cpv) and \
8491                                         "--quiet" not in self.myopts:
8492                                                 if mylist_index < len(mylist) - 1:
8493                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8494                                                         p.append(colorize("WARN", "    then resume the merge."))
8495
8496                 out = sys.stdout
8497                 show_repos = repoadd_set and repoadd_set != set(["0"])
8498
8499                 for x in p:
8500                         if isinstance(x, basestring):
8501                                 out.write("%s\n" % (x,))
8502                                 continue
8503
8504                         myprint, verboseadd, repoadd = x
8505
8506                         if verboseadd:
8507                                 myprint += " " + verboseadd
8508
8509                         if show_repos and repoadd:
8510                                 myprint += " " + teal("[%s]" % repoadd)
8511
8512                         out.write("%s\n" % (myprint,))
8513
8514                 for x in blockers:
8515                         print x
8516
8517                 if verbosity == 3:
8518                         print
8519                         print counters
8520                         if show_repos:
8521                                 sys.stdout.write(str(repo_display))
8522
8523                 if "--changelog" in self.myopts:
8524                         print
8525                         for revision,text in changelogs:
8526                                 print bold('*'+revision)
8527                                 sys.stdout.write(text)
8528
8529                 sys.stdout.flush()
8530                 return os.EX_OK
8531
8532         def display_problems(self):
8533                 """
8534                 Display problems with the dependency graph such as slot collisions.
8535                 This is called internally by display() to show the problems _after_
8536                 the merge list where it is most likely to be seen, but if display()
8537                 is not going to be called then this method should be called explicitly
8538                 to ensure that the user is notified of problems with the graph.
8539
8540                 All output goes to stderr, except for unsatisfied dependencies which
8541                 go to stdout for parsing by programs such as autounmask.
8542                 """
8543
8544                 # Note that show_masked_packages() sends it's output to
8545                 # stdout, and some programs such as autounmask parse the
8546                 # output in cases when emerge bails out. However, when
8547                 # show_masked_packages() is called for installed packages
8548                 # here, the message is a warning that is more appropriate
8549                 # to send to stderr, so temporarily redirect stdout to
8550                 # stderr. TODO: Fix output code so there's a cleaner way
8551                 # to redirect everything to stderr.
8552                 sys.stdout.flush()
8553                 sys.stderr.flush()
8554                 stdout = sys.stdout
8555                 try:
8556                         sys.stdout = sys.stderr
8557                         self._display_problems()
8558                 finally:
8559                         sys.stdout = stdout
8560                         sys.stdout.flush()
8561                         sys.stderr.flush()
8562
8563                 # This goes to stdout for parsing by programs like autounmask.
8564                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8565                         self._show_unsatisfied_dep(*pargs, **kwargs)
8566
8567         def _display_problems(self):
8568                 if self._circular_deps_for_display is not None:
8569                         self._show_circular_deps(
8570                                 self._circular_deps_for_display)
8571
8572                 # The user is only notified of a slot conflict if
8573                 # there are no unresolvable blocker conflicts.
8574                 if self._unsatisfied_blockers_for_display is not None:
8575                         self._show_unsatisfied_blockers(
8576                                 self._unsatisfied_blockers_for_display)
8577                 else:
8578                         self._show_slot_collision_notice()
8579
8580                 # TODO: Add generic support for "set problem" handlers so that
8581                 # the below warnings aren't special cases for world only.
8582
8583                 if self._missing_args:
8584                         world_problems = False
8585                         if "world" in self._sets:
8586                                 # Filter out indirect members of world (from nested sets)
8587                                 # since only direct members of world are desired here.
8588                                 world_set = self.roots[self.target_root].sets["world"]
8589                                 for arg, atom in self._missing_args:
8590                                         if arg.name == "world" and atom in world_set:
8591                                                 world_problems = True
8592                                                 break
8593
8594                         if world_problems:
8595                                 sys.stderr.write("\n!!! Problems have been " + \
8596                                         "detected with your world file\n")
8597                                 sys.stderr.write("!!! Please run " + \
8598                                         green("emaint --check world")+"\n\n")
8599
8600                 if self._missing_args:
8601                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8602                                 " Ebuilds for the following packages are either all\n")
8603                         sys.stderr.write(colorize("BAD", "!!!") + \
8604                                 " masked or don't exist:\n")
8605                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8606                                 self._missing_args) + "\n")
8607
8608                 if self._pprovided_args:
8609                         arg_refs = {}
8610                         for arg, atom in self._pprovided_args:
8611                                 if isinstance(arg, SetArg):
8612                                         parent = arg.name
8613                                         arg_atom = (atom, atom)
8614                                 else:
8615                                         parent = "args"
8616                                         arg_atom = (arg.arg, atom)
8617                                 refs = arg_refs.setdefault(arg_atom, [])
8618                                 if parent not in refs:
8619                                         refs.append(parent)
8620                         msg = []
8621                         msg.append(bad("\nWARNING: "))
8622                         if len(self._pprovided_args) > 1:
8623                                 msg.append("Requested packages will not be " + \
8624                                         "merged because they are listed in\n")
8625                         else:
8626                                 msg.append("A requested package will not be " + \
8627                                         "merged because it is listed in\n")
8628                         msg.append("package.provided:\n\n")
8629                         problems_sets = set()
8630                         for (arg, atom), refs in arg_refs.iteritems():
8631                                 ref_string = ""
8632                                 if refs:
8633                                         problems_sets.update(refs)
8634                                         refs.sort()
8635                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8636                                         ref_string = " pulled in by " + ref_string
8637                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8638                         msg.append("\n")
8639                         if "world" in problems_sets:
8640                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8641                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8642                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8643                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8644                                 msg.append("The best course of action depends on the reason that an offending\n")
8645                                 msg.append("package.provided entry exists.\n\n")
8646                         sys.stderr.write("".join(msg))
8647
8648                 masked_packages = []
8649                 for pkg in self._masked_installed:
8650                         root_config = pkg.root_config
8651                         pkgsettings = self.pkgsettings[pkg.root]
8652                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8653                         masked_packages.append((root_config, pkgsettings,
8654                                 pkg.cpv, pkg.metadata, mreasons))
8655                 if masked_packages:
8656                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8657                                 " The following installed packages are masked:\n")
8658                         show_masked_packages(masked_packages)
8659                         show_mask_docs()
8660                         print
8661
8662         def calc_changelog(self,ebuildpath,current,next):
8663                 if ebuildpath == None or not os.path.exists(ebuildpath):
8664                         return []
8665                 current = '-'.join(portage.catpkgsplit(current)[1:])
8666                 if current.endswith('-r0'):
8667                         current = current[:-3]
8668                 next = '-'.join(portage.catpkgsplit(next)[1:])
8669                 if next.endswith('-r0'):
8670                         next = next[:-3]
8671                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8672                 try:
8673                         changelog = open(changelogpath).read()
8674                 except SystemExit, e:
8675                         raise # Needed else can't exit
8676                 except:
8677                         return []
8678                 divisions = self.find_changelog_tags(changelog)
8679                 #print 'XX from',current,'to',next
8680                 #for div,text in divisions: print 'XX',div
8681                 # skip entries for all revisions above the one we are about to emerge
8682                 for i in range(len(divisions)):
8683                         if divisions[i][0]==next:
8684                                 divisions = divisions[i:]
8685                                 break
8686                 # find out how many entries we are going to display
8687                 for i in range(len(divisions)):
8688                         if divisions[i][0]==current:
8689                                 divisions = divisions[:i]
8690                                 break
8691                 else:
8692                     # couldnt find the current revision in the list. display nothing
8693                         return []
8694                 return divisions
8695
8696         def find_changelog_tags(self,changelog):
8697                 divs = []
8698                 release = None
8699                 while 1:
8700                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8701                         if match is None:
8702                                 if release is not None:
8703                                         divs.append((release,changelog))
8704                                 return divs
8705                         if release is not None:
8706                                 divs.append((release,changelog[:match.start()]))
8707                         changelog = changelog[match.end():]
8708                         release = match.group(1)
8709                         if release.endswith('.ebuild'):
8710                                 release = release[:-7]
8711                         if release.endswith('-r0'):
8712                                 release = release[:-3]
8713
8714         def saveNomergeFavorites(self):
8715                 """Find atoms in favorites that are not in the mergelist and add them
8716                 to the world file if necessary."""
8717                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8718                         "--oneshot", "--onlydeps", "--pretend"):
8719                         if x in self.myopts:
8720                                 return
8721                 root_config = self.roots[self.target_root]
8722                 world_set = root_config.sets["world"]
8723
8724                 world_locked = False
8725                 if hasattr(world_set, "lock"):
8726                         world_set.lock()
8727                         world_locked = True
8728
8729                 if hasattr(world_set, "load"):
8730                         world_set.load() # maybe it's changed on disk
8731
8732                 args_set = self._sets["args"]
8733                 portdb = self.trees[self.target_root]["porttree"].dbapi
8734                 added_favorites = set()
8735                 for x in self._set_nodes:
8736                         pkg_type, root, pkg_key, pkg_status = x
8737                         if pkg_status != "nomerge":
8738                                 continue
8739
8740                         try:
8741                                 myfavkey = create_world_atom(x, args_set, root_config)
8742                                 if myfavkey:
8743                                         if myfavkey in added_favorites:
8744                                                 continue
8745                                         added_favorites.add(myfavkey)
8746                         except portage.exception.InvalidDependString, e:
8747                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8748                                         (pkg_key, str(e)), noiselevel=-1)
8749                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8750                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8751                                 del e
8752                 all_added = []
8753                 for k in self._sets:
8754                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8755                                 continue
8756                         s = SETPREFIX + k
8757                         if s in world_set:
8758                                 continue
8759                         all_added.append(SETPREFIX + k)
8760                 all_added.extend(added_favorites)
8761                 all_added.sort()
8762                 for a in all_added:
8763                         print ">>> Recording %s in \"world\" favorites file..." % \
8764                                 colorize("INFORM", str(a))
8765                 if all_added:
8766                         world_set.update(all_added)
8767
8768                 if world_locked:
8769                         world_set.unlock()
8770
8771         def loadResumeCommand(self, resume_data, skip_masked=True,
8772                 skip_missing=True):
8773                 """
8774                 Add a resume command to the graph and validate it in the process.  This
8775                 will raise a PackageNotFound exception if a package is not available.
8776                 """
8777
8778                 if not isinstance(resume_data, dict):
8779                         return False
8780
8781                 mergelist = resume_data.get("mergelist")
8782                 if not isinstance(mergelist, list):
8783                         mergelist = []
8784
8785                 fakedb = self.mydbapi
8786                 trees = self.trees
8787                 serialized_tasks = []
8788                 masked_tasks = []
8789                 for x in mergelist:
8790                         if not (isinstance(x, list) and len(x) == 4):
8791                                 continue
8792                         pkg_type, myroot, pkg_key, action = x
8793                         if pkg_type not in self.pkg_tree_map:
8794                                 continue
8795                         if action != "merge":
8796                                 continue
8797                         tree_type = self.pkg_tree_map[pkg_type]
8798                         mydb = trees[myroot][tree_type].dbapi
8799                         db_keys = list(self._trees_orig[myroot][
8800                                 tree_type].dbapi._aux_cache_keys)
8801                         try:
8802                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8803                         except KeyError:
8804                                 # It does no exist or it is corrupt.
8805                                 if action == "uninstall":
8806                                         continue
8807                                 if skip_missing:
8808                                         # TODO: log these somewhere
8809                                         continue
8810                                 raise portage.exception.PackageNotFound(pkg_key)
8811                         installed = action == "uninstall"
8812                         built = pkg_type != "ebuild"
8813                         root_config = self.roots[myroot]
8814                         pkg = Package(built=built, cpv=pkg_key,
8815                                 installed=installed, metadata=metadata,
8816                                 operation=action, root_config=root_config,
8817                                 type_name=pkg_type)
8818                         if pkg_type == "ebuild":
8819                                 pkgsettings = self.pkgsettings[myroot]
8820                                 pkgsettings.setcpv(pkg)
8821                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8822                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8823                         self._pkg_cache[pkg] = pkg
8824
8825                         root_config = self.roots[pkg.root]
8826                         if "merge" == pkg.operation and \
8827                                 not visible(root_config.settings, pkg):
8828                                 if skip_masked:
8829                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8830                                 else:
8831                                         self._unsatisfied_deps_for_display.append(
8832                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8833
8834                         fakedb[myroot].cpv_inject(pkg)
8835                         serialized_tasks.append(pkg)
8836                         self.spinner.update()
8837
8838                 if self._unsatisfied_deps_for_display:
8839                         return False
8840
8841                 if not serialized_tasks or "--nodeps" in self.myopts:
8842                         self._serialized_tasks_cache = serialized_tasks
8843                         self._scheduler_graph = self.digraph
8844                 else:
8845                         self._select_package = self._select_pkg_from_graph
8846                         self.myparams.add("selective")
8847                         # Always traverse deep dependencies in order to account for
8848                         # potentially unsatisfied dependencies of installed packages.
8849                         # This is necessary for correct --keep-going or --resume operation
8850                         # in case a package from a group of circularly dependent packages
8851                         # fails. In this case, a package which has recently been installed
8852                         # may have an unsatisfied circular dependency (pulled in by
8853                         # PDEPEND, for example). So, even though a package is already
8854                         # installed, it may not have all of it's dependencies satisfied, so
8855                         # it may not be usable. If such a package is in the subgraph of
8856                         # deep depenedencies of a scheduled build, that build needs to
8857                         # be cancelled. In order for this type of situation to be
8858                         # recognized, deep traversal of dependencies is required.
8859                         self.myparams.add("deep")
8860
8861                         favorites = resume_data.get("favorites")
8862                         args_set = self._sets["args"]
8863                         if isinstance(favorites, list):
8864                                 args = self._load_favorites(favorites)
8865                         else:
8866                                 args = []
8867
8868                         for task in serialized_tasks:
8869                                 if isinstance(task, Package) and \
8870                                         task.operation == "merge":
8871                                         if not self._add_pkg(task, None):
8872                                                 return False
8873
8874                         # Packages for argument atoms need to be explicitly
8875                         # added via _add_pkg() so that they are included in the
8876                         # digraph (needed at least for --tree display).
8877                         for arg in args:
8878                                 for atom in arg.set:
8879                                         pkg, existing_node = self._select_package(
8880                                                 arg.root_config.root, atom)
8881                                         if existing_node is None and \
8882                                                 pkg is not None:
8883                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8884                                                         root=pkg.root, parent=arg)):
8885                                                         return False
8886
8887                         # Allow unsatisfied deps here to avoid showing a masking
8888                         # message for an unsatisfied dep that isn't necessarily
8889                         # masked.
8890                         if not self._create_graph(allow_unsatisfied=True):
8891                                 return False
8892
8893                         unsatisfied_deps = []
8894                         for dep in self._unsatisfied_deps:
8895                                 if not isinstance(dep.parent, Package):
8896                                         continue
8897                                 if dep.parent.operation == "merge":
8898                                         unsatisfied_deps.append(dep)
8899                                         continue
8900
8901                                 # For unsatisfied deps of installed packages, only account for
8902                                 # them if they are in the subgraph of dependencies of a package
8903                                 # which is scheduled to be installed.
8904                                 unsatisfied_install = False
8905                                 traversed = set()
8906                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8907                                 while dep_stack:
8908                                         node = dep_stack.pop()
8909                                         if not isinstance(node, Package):
8910                                                 continue
8911                                         if node.operation == "merge":
8912                                                 unsatisfied_install = True
8913                                                 break
8914                                         if node in traversed:
8915                                                 continue
8916                                         traversed.add(node)
8917                                         dep_stack.extend(self.digraph.parent_nodes(node))
8918
8919                                 if unsatisfied_install:
8920                                         unsatisfied_deps.append(dep)
8921
8922                         if masked_tasks or unsatisfied_deps:
8923                                 # This probably means that a required package
8924                                 # was dropped via --skipfirst. It makes the
8925                                 # resume list invalid, so convert it to a
8926                                 # UnsatisfiedResumeDep exception.
8927                                 raise self.UnsatisfiedResumeDep(self,
8928                                         masked_tasks + unsatisfied_deps)
8929                         self._serialized_tasks_cache = None
8930                         try:
8931                                 self.altlist()
8932                         except self._unknown_internal_error:
8933                                 return False
8934
8935                 return True
8936
8937         def _load_favorites(self, favorites):
8938                 """
8939                 Use a list of favorites to resume state from a
8940                 previous select_files() call. This creates similar
8941                 DependencyArg instances to those that would have
8942                 been created by the original select_files() call.
8943                 This allows Package instances to be matched with
8944                 DependencyArg instances during graph creation.
8945                 """
8946                 root_config = self.roots[self.target_root]
8947                 getSetAtoms = root_config.setconfig.getSetAtoms
8948                 sets = root_config.sets
8949                 args = []
8950                 for x in favorites:
8951                         if not isinstance(x, basestring):
8952                                 continue
8953                         if x in ("system", "world"):
8954                                 x = SETPREFIX + x
8955                         if x.startswith(SETPREFIX):
8956                                 s = x[len(SETPREFIX):]
8957                                 if s not in sets:
8958                                         continue
8959                                 if s in self._sets:
8960                                         continue
8961                                 # Recursively expand sets so that containment tests in
8962                                 # self._get_parent_sets() properly match atoms in nested
8963                                 # sets (like if world contains system).
8964                                 expanded_set = InternalPackageSet(
8965                                         initial_atoms=getSetAtoms(s))
8966                                 self._sets[s] = expanded_set
8967                                 args.append(SetArg(arg=x, set=expanded_set,
8968                                         root_config=root_config))
8969                         else:
8970                                 if not portage.isvalidatom(x):
8971                                         continue
8972                                 args.append(AtomArg(arg=x, atom=x,
8973                                         root_config=root_config))
8974
8975                 self._set_args(args)
8976                 return args
8977
8978         class UnsatisfiedResumeDep(portage.exception.PortageException):
8979                 """
8980                 A dependency of a resume list is not installed. This
8981                 can occur when a required package is dropped from the
8982                 merge list via --skipfirst.
8983                 """
8984                 def __init__(self, depgraph, value):
8985                         portage.exception.PortageException.__init__(self, value)
8986                         self.depgraph = depgraph
8987
8988         class _internal_exception(portage.exception.PortageException):
8989                 def __init__(self, value=""):
8990                         portage.exception.PortageException.__init__(self, value)
8991
8992         class _unknown_internal_error(_internal_exception):
8993                 """
8994                 Used by the depgraph internally to terminate graph creation.
8995                 The specific reason for the failure should have been dumped
8996                 to stderr, unfortunately, the exact reason for the failure
8997                 may not be known.
8998                 """
8999
9000         class _serialize_tasks_retry(_internal_exception):
9001                 """
9002                 This is raised by the _serialize_tasks() method when it needs to
9003                 be called again for some reason. The only case that it's currently
9004                 used for is when neglected dependencies need to be added to the
9005                 graph in order to avoid making a potentially unsafe decision.
9006                 """
9007
9008         class _dep_check_composite_db(portage.dbapi):
9009                 """
9010                 A dbapi-like interface that is optimized for use in dep_check() calls.
9011                 This is built on top of the existing depgraph package selection logic.
9012                 Some packages that have been added to the graph may be masked from this
9013                 view in order to influence the atom preference selection that occurs
9014                 via dep_check().
9015                 """
9016                 def __init__(self, depgraph, root):
9017                         portage.dbapi.__init__(self)
9018                         self._depgraph = depgraph
9019                         self._root = root
9020                         self._match_cache = {}
9021                         self._cpv_pkg_map = {}
9022
9023                 def _clear_cache(self):
9024                         self._match_cache.clear()
9025                         self._cpv_pkg_map.clear()
9026
9027                 def match(self, atom):
9028                         ret = self._match_cache.get(atom)
9029                         if ret is not None:
9030                                 return ret[:]
9031                         orig_atom = atom
9032                         if "/" not in atom:
9033                                 atom = self._dep_expand(atom)
9034                         pkg, existing = self._depgraph._select_package(self._root, atom)
9035                         if not pkg:
9036                                 ret = []
9037                         else:
9038                                 # Return the highest available from select_package() as well as
9039                                 # any matching slots in the graph db.
9040                                 slots = set()
9041                                 slots.add(pkg.metadata["SLOT"])
9042                                 atom_cp = portage.dep_getkey(atom)
9043                                 if pkg.cp.startswith("virtual/"):
9044                                         # For new-style virtual lookahead that occurs inside
9045                                         # dep_check(), examine all slots. This is needed
9046                                         # so that newer slots will not unnecessarily be pulled in
9047                                         # when a satisfying lower slot is already installed. For
9048                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9049                                         # there's no need to pull in a newer slot to satisfy a
9050                                         # virtual/jdk dependency.
9051                                         for db, pkg_type, built, installed, db_keys in \
9052                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9053                                                 for cpv in db.match(atom):
9054                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9055                                                                 continue
9056                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9057                                 ret = []
9058                                 if self._visible(pkg):
9059                                         self._cpv_pkg_map[pkg.cpv] = pkg
9060                                         ret.append(pkg.cpv)
9061                                 slots.remove(pkg.metadata["SLOT"])
9062                                 while slots:
9063                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9064                                         pkg, existing = self._depgraph._select_package(
9065                                                 self._root, slot_atom)
9066                                         if not pkg:
9067                                                 continue
9068                                         if not self._visible(pkg):
9069                                                 continue
9070                                         self._cpv_pkg_map[pkg.cpv] = pkg
9071                                         ret.append(pkg.cpv)
9072                                 if ret:
9073                                         self._cpv_sort_ascending(ret)
9074                         self._match_cache[orig_atom] = ret
9075                         return ret[:]
9076
9077                 def _visible(self, pkg):
9078                         if pkg.installed and "selective" not in self._depgraph.myparams:
9079                                 try:
9080                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9081                                 except (StopIteration, portage.exception.InvalidDependString):
9082                                         arg = None
9083                                 if arg:
9084                                         return False
9085                         if pkg.installed:
9086                                 try:
9087                                         if not visible(
9088                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9089                                                 return False
9090                                 except portage.exception.InvalidDependString:
9091                                         pass
9092                         in_graph = self._depgraph._slot_pkg_map[
9093                                 self._root].get(pkg.slot_atom)
9094                         if in_graph is None:
9095                                 # Mask choices for packages which are not the highest visible
9096                                 # version within their slot (since they usually trigger slot
9097                                 # conflicts).
9098                                 highest_visible, in_graph = self._depgraph._select_package(
9099                                         self._root, pkg.slot_atom)
9100                                 if pkg != highest_visible:
9101                                         return False
9102                         elif in_graph != pkg:
9103                                 # Mask choices for packages that would trigger a slot
9104                                 # conflict with a previously selected package.
9105                                 return False
9106                         return True
9107
9108                 def _dep_expand(self, atom):
9109                         """
9110                         This is only needed for old installed packages that may
9111                         contain atoms that are not fully qualified with a specific
9112                         category. Emulate the cpv_expand() function that's used by
9113                         dbapi.match() in cases like this. If there are multiple
9114                         matches, it's often due to a new-style virtual that has
9115                         been added, so try to filter those out to avoid raising
9116                         a ValueError.
9117                         """
9118                         root_config = self._depgraph.roots[self._root]
9119                         orig_atom = atom
9120                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9121                         if len(expanded_atoms) > 1:
9122                                 non_virtual_atoms = []
9123                                 for x in expanded_atoms:
9124                                         if not portage.dep_getkey(x).startswith("virtual/"):
9125                                                 non_virtual_atoms.append(x)
9126                                 if len(non_virtual_atoms) == 1:
9127                                         expanded_atoms = non_virtual_atoms
9128                         if len(expanded_atoms) > 1:
9129                                 # compatible with portage.cpv_expand()
9130                                 raise portage.exception.AmbiguousPackageName(
9131                                         [portage.dep_getkey(x) for x in expanded_atoms])
9132                         if expanded_atoms:
9133                                 atom = expanded_atoms[0]
9134                         else:
9135                                 null_atom = insert_category_into_atom(atom, "null")
9136                                 null_cp = portage.dep_getkey(null_atom)
9137                                 cat, atom_pn = portage.catsplit(null_cp)
9138                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9139                                 if virts_p:
9140                                         # Allow the resolver to choose which virtual.
9141                                         atom = insert_category_into_atom(atom, "virtual")
9142                                 else:
9143                                         atom = insert_category_into_atom(atom, "null")
9144                         return atom
9145
9146                 def aux_get(self, cpv, wants):
9147                         metadata = self._cpv_pkg_map[cpv].metadata
9148                         return [metadata.get(x, "") for x in wants]
9149
9150 class RepoDisplay(object):
9151         def __init__(self, roots):
9152                 self._shown_repos = {}
9153                 self._unknown_repo = False
9154                 repo_paths = set()
9155                 for root_config in roots.itervalues():
9156                         portdir = root_config.settings.get("PORTDIR")
9157                         if portdir:
9158                                 repo_paths.add(portdir)
9159                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9160                         if overlays:
9161                                 repo_paths.update(overlays.split())
9162                 repo_paths = list(repo_paths)
9163                 self._repo_paths = repo_paths
9164                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9165                         for repo_path in repo_paths ]
9166
9167                 # pre-allocate index for PORTDIR so that it always has index 0.
9168                 for root_config in roots.itervalues():
9169                         portdb = root_config.trees["porttree"].dbapi
9170                         portdir = portdb.porttree_root
9171                         if portdir:
9172                                 self.repoStr(portdir)
9173
9174         def repoStr(self, repo_path_real):
9175                 real_index = -1
9176                 if repo_path_real:
9177                         real_index = self._repo_paths_real.index(repo_path_real)
9178                 if real_index == -1:
9179                         s = "?"
9180                         self._unknown_repo = True
9181                 else:
9182                         shown_repos = self._shown_repos
9183                         repo_paths = self._repo_paths
9184                         repo_path = repo_paths[real_index]
9185                         index = shown_repos.get(repo_path)
9186                         if index is None:
9187                                 index = len(shown_repos)
9188                                 shown_repos[repo_path] = index
9189                         s = str(index)
9190                 return s
9191
9192         def __str__(self):
9193                 output = []
9194                 shown_repos = self._shown_repos
9195                 unknown_repo = self._unknown_repo
9196                 if shown_repos or self._unknown_repo:
9197                         output.append("Portage tree and overlays:\n")
9198                 show_repo_paths = list(shown_repos)
9199                 for repo_path, repo_index in shown_repos.iteritems():
9200                         show_repo_paths[repo_index] = repo_path
9201                 if show_repo_paths:
9202                         for index, repo_path in enumerate(show_repo_paths):
9203                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9204                 if unknown_repo:
9205                         output.append(" "+teal("[?]") + \
9206                                 " indicates that the source repository could not be determined\n")
9207                 return "".join(output)
9208
9209 class PackageCounters(object):
9210
9211         def __init__(self):
9212                 self.upgrades   = 0
9213                 self.downgrades = 0
9214                 self.new        = 0
9215                 self.newslot    = 0
9216                 self.reinst     = 0
9217                 self.uninst     = 0
9218                 self.blocks     = 0
9219                 self.blocks_satisfied         = 0
9220                 self.totalsize  = 0
9221                 self.restrict_fetch           = 0
9222                 self.restrict_fetch_satisfied = 0
9223                 self.interactive              = 0
9224
9225         def __str__(self):
9226                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9227                 myoutput = []
9228                 details = []
9229                 myoutput.append("Total: %s package" % total_installs)
9230                 if total_installs != 1:
9231                         myoutput.append("s")
9232                 if total_installs != 0:
9233                         myoutput.append(" (")
9234                 if self.upgrades > 0:
9235                         details.append("%s upgrade" % self.upgrades)
9236                         if self.upgrades > 1:
9237                                 details[-1] += "s"
9238                 if self.downgrades > 0:
9239                         details.append("%s downgrade" % self.downgrades)
9240                         if self.downgrades > 1:
9241                                 details[-1] += "s"
9242                 if self.new > 0:
9243                         details.append("%s new" % self.new)
9244                 if self.newslot > 0:
9245                         details.append("%s in new slot" % self.newslot)
9246                         if self.newslot > 1:
9247                                 details[-1] += "s"
9248                 if self.reinst > 0:
9249                         details.append("%s reinstall" % self.reinst)
9250                         if self.reinst > 1:
9251                                 details[-1] += "s"
9252                 if self.uninst > 0:
9253                         details.append("%s uninstall" % self.uninst)
9254                         if self.uninst > 1:
9255                                 details[-1] += "s"
9256                 if self.interactive > 0:
9257                         details.append("%s %s" % (self.interactive,
9258                                 colorize("WARN", "interactive")))
9259                 myoutput.append(", ".join(details))
9260                 if total_installs != 0:
9261                         myoutput.append(")")
9262                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9263                 if self.restrict_fetch:
9264                         myoutput.append("\nFetch Restriction: %s package" % \
9265                                 self.restrict_fetch)
9266                         if self.restrict_fetch > 1:
9267                                 myoutput.append("s")
9268                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9269                         myoutput.append(bad(" (%s unsatisfied)") % \
9270                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9271                 if self.blocks > 0:
9272                         myoutput.append("\nConflict: %s block" % \
9273                                 self.blocks)
9274                         if self.blocks > 1:
9275                                 myoutput.append("s")
9276                         if self.blocks_satisfied < self.blocks:
9277                                 myoutput.append(bad(" (%s unsatisfied)") % \
9278                                         (self.blocks - self.blocks_satisfied))
9279                 return "".join(myoutput)
9280
9281 class UseFlagDisplay(object):
9282
9283         __slots__ = ('name', 'enabled', 'forced')
9284
9285         def __init__(self, name, enabled, forced):
9286                 self.name = name
9287                 self.enabled = enabled
9288                 self.forced = forced
9289
9290         def __str__(self):
9291                 s = self.name
9292                 if self.enabled:
9293                         s = red(s)
9294                 else:
9295                         s = '-' + s
9296                         s = blue(s)
9297                 if self.forced:
9298                         s = '(%s)' % s
9299                 return s
9300
9301         def _cmp_combined(a, b):
9302                 """
9303                 Sort by name, combining enabled and disabled flags.
9304                 """
9305                 return (a.name > b.name) - (a.name < b.name)
9306
9307         sort_combined = cmp_sort_key(_cmp_combined)
9308         del _cmp_combined
9309
9310         def _cmp_separated(a, b):
9311                 """
9312                 Sort by name, separating enabled flags from disabled flags.
9313                 """
9314                 enabled_diff = b.enabled - a.enabled
9315                 if enabled_diff:
9316                         return enabled_diff
9317                 return (a.name > b.name) - (a.name < b.name)
9318
9319         sort_separated = cmp_sort_key(_cmp_separated)
9320         del _cmp_separated
9321
9322 class PollSelectAdapter(PollConstants):
9323
9324         """
9325         Use select to emulate a poll object, for
9326         systems that don't support poll().
9327         """
9328
9329         def __init__(self):
9330                 self._registered = {}
9331                 self._select_args = [[], [], []]
9332
9333         def register(self, fd, *args):
9334                 """
9335                 Only POLLIN is currently supported!
9336                 """
9337                 if len(args) > 1:
9338                         raise TypeError(
9339                                 "register expected at most 2 arguments, got " + \
9340                                 repr(1 + len(args)))
9341
9342                 eventmask = PollConstants.POLLIN | \
9343                         PollConstants.POLLPRI | PollConstants.POLLOUT
9344                 if args:
9345                         eventmask = args[0]
9346
9347                 self._registered[fd] = eventmask
9348                 self._select_args = None
9349
9350         def unregister(self, fd):
9351                 self._select_args = None
9352                 del self._registered[fd]
9353
9354         def poll(self, *args):
9355                 if len(args) > 1:
9356                         raise TypeError(
9357                                 "poll expected at most 2 arguments, got " + \
9358                                 repr(1 + len(args)))
9359
9360                 timeout = None
9361                 if args:
9362                         timeout = args[0]
9363
9364                 select_args = self._select_args
9365                 if select_args is None:
9366                         select_args = [self._registered.keys(), [], []]
9367
9368                 if timeout is not None:
9369                         select_args = select_args[:]
9370                         # Translate poll() timeout args to select() timeout args:
9371                         #
9372                         #          | units        | value(s) for indefinite block
9373                         # ---------|--------------|------------------------------
9374                         #   poll   | milliseconds | omitted, negative, or None
9375                         # ---------|--------------|------------------------------
9376                         #   select | seconds      | omitted
9377                         # ---------|--------------|------------------------------
9378
9379                         if timeout is not None and timeout < 0:
9380                                 timeout = None
9381                         if timeout is not None:
9382                                 select_args.append(timeout / 1000)
9383
9384                 select_events = select.select(*select_args)
9385                 poll_events = []
9386                 for fd in select_events[0]:
9387                         poll_events.append((fd, PollConstants.POLLIN))
9388                 return poll_events
9389
9390 class SequentialTaskQueue(SlotObject):
9391
9392         __slots__ = ("max_jobs", "running_tasks") + \
9393                 ("_dirty", "_scheduling", "_task_queue")
9394
9395         def __init__(self, **kwargs):
9396                 SlotObject.__init__(self, **kwargs)
9397                 self._task_queue = deque()
9398                 self.running_tasks = set()
9399                 if self.max_jobs is None:
9400                         self.max_jobs = 1
9401                 self._dirty = True
9402
9403         def add(self, task):
9404                 self._task_queue.append(task)
9405                 self._dirty = True
9406
9407         def addFront(self, task):
9408                 self._task_queue.appendleft(task)
9409                 self._dirty = True
9410
9411         def schedule(self):
9412
9413                 if not self._dirty:
9414                         return False
9415
9416                 if not self:
9417                         return False
9418
9419                 if self._scheduling:
9420                         # Ignore any recursive schedule() calls triggered via
9421                         # self._task_exit().
9422                         return False
9423
9424                 self._scheduling = True
9425
9426                 task_queue = self._task_queue
9427                 running_tasks = self.running_tasks
9428                 max_jobs = self.max_jobs
9429                 state_changed = False
9430
9431                 while task_queue and \
9432                         (max_jobs is True or len(running_tasks) < max_jobs):
9433                         task = task_queue.popleft()
9434                         cancelled = getattr(task, "cancelled", None)
9435                         if not cancelled:
9436                                 running_tasks.add(task)
9437                                 task.addExitListener(self._task_exit)
9438                                 task.start()
9439                         state_changed = True
9440
9441                 self._dirty = False
9442                 self._scheduling = False
9443
9444                 return state_changed
9445
9446         def _task_exit(self, task):
9447                 """
9448                 Since we can always rely on exit listeners being called, the set of
9449                 running tasks is always pruned automatically and there is never any need
9450                 to actively prune it.
9451                 """
9452                 self.running_tasks.remove(task)
9453                 if self._task_queue:
9454                         self._dirty = True
9455
9456         def clear(self):
9457                 self._task_queue.clear()
9458                 running_tasks = self.running_tasks
9459                 while running_tasks:
9460                         task = running_tasks.pop()
9461                         task.removeExitListener(self._task_exit)
9462                         task.cancel()
9463                 self._dirty = False
9464
9465         def __nonzero__(self):
9466                 return bool(self._task_queue or self.running_tasks)
9467
9468         def __len__(self):
9469                 return len(self._task_queue) + len(self.running_tasks)
9470
9471 _can_poll_device = None
9472
9473 def can_poll_device():
9474         """
9475         Test if it's possible to use poll() on a device such as a pty. This
9476         is known to fail on Darwin.
9477         @rtype: bool
9478         @returns: True if poll() on a device succeeds, False otherwise.
9479         """
9480
9481         global _can_poll_device
9482         if _can_poll_device is not None:
9483                 return _can_poll_device
9484
9485         if not hasattr(select, "poll"):
9486                 _can_poll_device = False
9487                 return _can_poll_device
9488
9489         try:
9490                 dev_null = open('/dev/null', 'rb')
9491         except IOError:
9492                 _can_poll_device = False
9493                 return _can_poll_device
9494
9495         p = select.poll()
9496         p.register(dev_null.fileno(), PollConstants.POLLIN)
9497
9498         invalid_request = False
9499         for f, event in p.poll():
9500                 if event & PollConstants.POLLNVAL:
9501                         invalid_request = True
9502                         break
9503         dev_null.close()
9504
9505         _can_poll_device = not invalid_request
9506         return _can_poll_device
9507
9508 def create_poll_instance():
9509         """
9510         Create an instance of select.poll, or an instance of
9511         PollSelectAdapter there is no poll() implementation or
9512         it is broken somehow.
9513         """
9514         if can_poll_device():
9515                 return select.poll()
9516         return PollSelectAdapter()
9517
9518 getloadavg = getattr(os, "getloadavg", None)
9519 if getloadavg is None:
9520         def getloadavg():
9521                 """
9522                 Uses /proc/loadavg to emulate os.getloadavg().
9523                 Raises OSError if the load average was unobtainable.
9524                 """
9525                 try:
9526                         loadavg_str = open('/proc/loadavg').readline()
9527                 except IOError:
9528                         # getloadavg() is only supposed to raise OSError, so convert
9529                         raise OSError('unknown')
9530                 loadavg_split = loadavg_str.split()
9531                 if len(loadavg_split) < 3:
9532                         raise OSError('unknown')
9533                 loadavg_floats = []
9534                 for i in xrange(3):
9535                         try:
9536                                 loadavg_floats.append(float(loadavg_split[i]))
9537                         except ValueError:
9538                                 raise OSError('unknown')
9539                 return tuple(loadavg_floats)
9540
9541 class PollScheduler(object):
9542
9543         class _sched_iface_class(SlotObject):
9544                 __slots__ = ("register", "schedule", "unregister")
9545
9546         def __init__(self):
9547                 self._max_jobs = 1
9548                 self._max_load = None
9549                 self._jobs = 0
9550                 self._poll_event_queue = []
9551                 self._poll_event_handlers = {}
9552                 self._poll_event_handler_ids = {}
9553                 # Increment id for each new handler.
9554                 self._event_handler_id = 0
9555                 self._poll_obj = create_poll_instance()
9556                 self._scheduling = False
9557
9558         def _schedule(self):
9559                 """
9560                 Calls _schedule_tasks() and automatically returns early from
9561                 any recursive calls to this method that the _schedule_tasks()
9562                 call might trigger. This makes _schedule() safe to call from
9563                 inside exit listeners.
9564                 """
9565                 if self._scheduling:
9566                         return False
9567                 self._scheduling = True
9568                 try:
9569                         return self._schedule_tasks()
9570                 finally:
9571                         self._scheduling = False
9572
9573         def _running_job_count(self):
9574                 return self._jobs
9575
9576         def _can_add_job(self):
9577                 max_jobs = self._max_jobs
9578                 max_load = self._max_load
9579
9580                 if self._max_jobs is not True and \
9581                         self._running_job_count() >= self._max_jobs:
9582                         return False
9583
9584                 if max_load is not None and \
9585                         (max_jobs is True or max_jobs > 1) and \
9586                         self._running_job_count() >= 1:
9587                         try:
9588                                 avg1, avg5, avg15 = getloadavg()
9589                         except OSError:
9590                                 return False
9591
9592                         if avg1 >= max_load:
9593                                 return False
9594
9595                 return True
9596
9597         def _poll(self, timeout=None):
9598                 """
9599                 All poll() calls pass through here. The poll events
9600                 are added directly to self._poll_event_queue.
9601                 In order to avoid endless blocking, this raises
9602                 StopIteration if timeout is None and there are
9603                 no file descriptors to poll.
9604                 """
9605                 if not self._poll_event_handlers:
9606                         self._schedule()
9607                         if timeout is None and \
9608                                 not self._poll_event_handlers:
9609                                 raise StopIteration(
9610                                         "timeout is None and there are no poll() event handlers")
9611
9612                 # The following error is known to occur with Linux kernel versions
9613                 # less than 2.6.24:
9614                 #
9615                 #   select.error: (4, 'Interrupted system call')
9616                 #
9617                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9618                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9619                 # without any events.
9620                 while True:
9621                         try:
9622                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9623                                 break
9624                         except select.error, e:
9625                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9626                                         level=logging.ERROR, noiselevel=-1)
9627                                 del e
9628                                 if timeout is not None:
9629                                         break
9630
9631         def _next_poll_event(self, timeout=None):
9632                 """
9633                 Since the _schedule_wait() loop is called by event
9634                 handlers from _poll_loop(), maintain a central event
9635                 queue for both of them to share events from a single
9636                 poll() call. In order to avoid endless blocking, this
9637                 raises StopIteration if timeout is None and there are
9638                 no file descriptors to poll.
9639                 """
9640                 if not self._poll_event_queue:
9641                         self._poll(timeout)
9642                 return self._poll_event_queue.pop()
9643
9644         def _poll_loop(self):
9645
9646                 event_handlers = self._poll_event_handlers
9647                 event_handled = False
9648
9649                 try:
9650                         while event_handlers:
9651                                 f, event = self._next_poll_event()
9652                                 handler, reg_id = event_handlers[f]
9653                                 handler(f, event)
9654                                 event_handled = True
9655                 except StopIteration:
9656                         event_handled = True
9657
9658                 if not event_handled:
9659                         raise AssertionError("tight loop")
9660
9661         def _schedule_yield(self):
9662                 """
9663                 Schedule for a short period of time chosen by the scheduler based
9664                 on internal state. Synchronous tasks should call this periodically
9665                 in order to allow the scheduler to service pending poll events. The
9666                 scheduler will call poll() exactly once, without blocking, and any
9667                 resulting poll events will be serviced.
9668                 """
9669                 event_handlers = self._poll_event_handlers
9670                 events_handled = 0
9671
9672                 if not event_handlers:
9673                         return bool(events_handled)
9674
9675                 if not self._poll_event_queue:
9676                         self._poll(0)
9677
9678                 try:
9679                         while event_handlers and self._poll_event_queue:
9680                                 f, event = self._next_poll_event()
9681                                 handler, reg_id = event_handlers[f]
9682                                 handler(f, event)
9683                                 events_handled += 1
9684                 except StopIteration:
9685                         events_handled += 1
9686
9687                 return bool(events_handled)
9688
9689         def _register(self, f, eventmask, handler):
9690                 """
9691                 @rtype: Integer
9692                 @return: A unique registration id, for use in schedule() or
9693                         unregister() calls.
9694                 """
9695                 if f in self._poll_event_handlers:
9696                         raise AssertionError("fd %d is already registered" % f)
9697                 self._event_handler_id += 1
9698                 reg_id = self._event_handler_id
9699                 self._poll_event_handler_ids[reg_id] = f
9700                 self._poll_event_handlers[f] = (handler, reg_id)
9701                 self._poll_obj.register(f, eventmask)
9702                 return reg_id
9703
9704         def _unregister(self, reg_id):
9705                 f = self._poll_event_handler_ids[reg_id]
9706                 self._poll_obj.unregister(f)
9707                 del self._poll_event_handlers[f]
9708                 del self._poll_event_handler_ids[reg_id]
9709
9710         def _schedule_wait(self, wait_ids):
9711                 """
9712                 Schedule until wait_id is not longer registered
9713                 for poll() events.
9714                 @type wait_id: int
9715                 @param wait_id: a task id to wait for
9716                 """
9717                 event_handlers = self._poll_event_handlers
9718                 handler_ids = self._poll_event_handler_ids
9719                 event_handled = False
9720
9721                 if isinstance(wait_ids, int):
9722                         wait_ids = frozenset([wait_ids])
9723
9724                 try:
9725                         while wait_ids.intersection(handler_ids):
9726                                 f, event = self._next_poll_event()
9727                                 handler, reg_id = event_handlers[f]
9728                                 handler(f, event)
9729                                 event_handled = True
9730                 except StopIteration:
9731                         event_handled = True
9732
9733                 return event_handled
9734
9735 class QueueScheduler(PollScheduler):
9736
9737         """
9738         Add instances of SequentialTaskQueue and then call run(). The
9739         run() method returns when no tasks remain.
9740         """
9741
9742         def __init__(self, max_jobs=None, max_load=None):
9743                 PollScheduler.__init__(self)
9744
9745                 if max_jobs is None:
9746                         max_jobs = 1
9747
9748                 self._max_jobs = max_jobs
9749                 self._max_load = max_load
9750                 self.sched_iface = self._sched_iface_class(
9751                         register=self._register,
9752                         schedule=self._schedule_wait,
9753                         unregister=self._unregister)
9754
9755                 self._queues = []
9756                 self._schedule_listeners = []
9757
9758         def add(self, q):
9759                 self._queues.append(q)
9760
9761         def remove(self, q):
9762                 self._queues.remove(q)
9763
9764         def run(self):
9765
9766                 while self._schedule():
9767                         self._poll_loop()
9768
9769                 while self._running_job_count():
9770                         self._poll_loop()
9771
9772         def _schedule_tasks(self):
9773                 """
9774                 @rtype: bool
9775                 @returns: True if there may be remaining tasks to schedule,
9776                         False otherwise.
9777                 """
9778                 while self._can_add_job():
9779                         n = self._max_jobs - self._running_job_count()
9780                         if n < 1:
9781                                 break
9782
9783                         if not self._start_next_job(n):
9784                                 return False
9785
9786                 for q in self._queues:
9787                         if q:
9788                                 return True
9789                 return False
9790
9791         def _running_job_count(self):
9792                 job_count = 0
9793                 for q in self._queues:
9794                         job_count += len(q.running_tasks)
9795                 self._jobs = job_count
9796                 return job_count
9797
9798         def _start_next_job(self, n=1):
9799                 started_count = 0
9800                 for q in self._queues:
9801                         initial_job_count = len(q.running_tasks)
9802                         q.schedule()
9803                         final_job_count = len(q.running_tasks)
9804                         if final_job_count > initial_job_count:
9805                                 started_count += (final_job_count - initial_job_count)
9806                         if started_count >= n:
9807                                 break
9808                 return started_count
9809
9810 class TaskScheduler(object):
9811
9812         """
9813         A simple way to handle scheduling of AsynchrousTask instances. Simply
9814         add tasks and call run(). The run() method returns when no tasks remain.
9815         """
9816
9817         def __init__(self, max_jobs=None, max_load=None):
9818                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9819                 self._scheduler = QueueScheduler(
9820                         max_jobs=max_jobs, max_load=max_load)
9821                 self.sched_iface = self._scheduler.sched_iface
9822                 self.run = self._scheduler.run
9823                 self._scheduler.add(self._queue)
9824
9825         def add(self, task):
9826                 self._queue.add(task)
9827
9828 class JobStatusDisplay(object):
9829
9830         _bound_properties = ("curval", "failed", "running")
9831         _jobs_column_width = 48
9832
9833         # Don't update the display unless at least this much
9834         # time has passed, in units of seconds.
9835         _min_display_latency = 2
9836
9837         _default_term_codes = {
9838                 'cr'  : '\r',
9839                 'el'  : '\x1b[K',
9840                 'nel' : '\n',
9841         }
9842
9843         _termcap_name_map = {
9844                 'carriage_return' : 'cr',
9845                 'clr_eol'         : 'el',
9846                 'newline'         : 'nel',
9847         }
9848
9849         def __init__(self, out=sys.stdout, quiet=False):
9850                 object.__setattr__(self, "out", out)
9851                 object.__setattr__(self, "quiet", quiet)
9852                 object.__setattr__(self, "maxval", 0)
9853                 object.__setattr__(self, "merges", 0)
9854                 object.__setattr__(self, "_changed", False)
9855                 object.__setattr__(self, "_displayed", False)
9856                 object.__setattr__(self, "_last_display_time", 0)
9857                 object.__setattr__(self, "width", 80)
9858                 self.reset()
9859
9860                 isatty = hasattr(out, "isatty") and out.isatty()
9861                 object.__setattr__(self, "_isatty", isatty)
9862                 if not isatty or not self._init_term():
9863                         term_codes = {}
9864                         for k, capname in self._termcap_name_map.iteritems():
9865                                 term_codes[k] = self._default_term_codes[capname]
9866                         object.__setattr__(self, "_term_codes", term_codes)
9867                 encoding = sys.getdefaultencoding()
9868                 for k, v in self._term_codes.items():
9869                         if not isinstance(v, basestring):
9870                                 self._term_codes[k] = v.decode(encoding, 'replace')
9871
9872         def _init_term(self):
9873                 """
9874                 Initialize term control codes.
9875                 @rtype: bool
9876                 @returns: True if term codes were successfully initialized,
9877                         False otherwise.
9878                 """
9879
9880                 term_type = os.environ.get("TERM", "vt100")
9881                 tigetstr = None
9882
9883                 try:
9884                         import curses
9885                         try:
9886                                 curses.setupterm(term_type, self.out.fileno())
9887                                 tigetstr = curses.tigetstr
9888                         except curses.error:
9889                                 pass
9890                 except ImportError:
9891                         pass
9892
9893                 if tigetstr is None:
9894                         return False
9895
9896                 term_codes = {}
9897                 for k, capname in self._termcap_name_map.iteritems():
9898                         code = tigetstr(capname)
9899                         if code is None:
9900                                 code = self._default_term_codes[capname]
9901                         term_codes[k] = code
9902                 object.__setattr__(self, "_term_codes", term_codes)
9903                 return True
9904
9905         def _format_msg(self, msg):
9906                 return ">>> %s" % msg
9907
9908         def _erase(self):
9909                 self.out.write(
9910                         self._term_codes['carriage_return'] + \
9911                         self._term_codes['clr_eol'])
9912                 self.out.flush()
9913                 self._displayed = False
9914
9915         def _display(self, line):
9916                 self.out.write(line)
9917                 self.out.flush()
9918                 self._displayed = True
9919
9920         def _update(self, msg):
9921
9922                 out = self.out
9923                 if not self._isatty:
9924                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9925                         self.out.flush()
9926                         self._displayed = True
9927                         return
9928
9929                 if self._displayed:
9930                         self._erase()
9931
9932                 self._display(self._format_msg(msg))
9933
9934         def displayMessage(self, msg):
9935
9936                 was_displayed = self._displayed
9937
9938                 if self._isatty and self._displayed:
9939                         self._erase()
9940
9941                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9942                 self.out.flush()
9943                 self._displayed = False
9944
9945                 if was_displayed:
9946                         self._changed = True
9947                         self.display()
9948
9949         def reset(self):
9950                 self.maxval = 0
9951                 self.merges = 0
9952                 for name in self._bound_properties:
9953                         object.__setattr__(self, name, 0)
9954
9955                 if self._displayed:
9956                         self.out.write(self._term_codes['newline'])
9957                         self.out.flush()
9958                         self._displayed = False
9959
9960         def __setattr__(self, name, value):
9961                 old_value = getattr(self, name)
9962                 if value == old_value:
9963                         return
9964                 object.__setattr__(self, name, value)
9965                 if name in self._bound_properties:
9966                         self._property_change(name, old_value, value)
9967
9968         def _property_change(self, name, old_value, new_value):
9969                 self._changed = True
9970                 self.display()
9971
9972         def _load_avg_str(self):
9973                 try:
9974                         avg = getloadavg()
9975                 except OSError:
9976                         return 'unknown'
9977
9978                 max_avg = max(avg)
9979
9980                 if max_avg < 10:
9981                         digits = 2
9982                 elif max_avg < 100:
9983                         digits = 1
9984                 else:
9985                         digits = 0
9986
9987                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9988
9989         def display(self):
9990                 """
9991                 Display status on stdout, but only if something has
9992                 changed since the last call.
9993                 """
9994
9995                 if self.quiet:
9996                         return
9997
9998                 current_time = time.time()
9999                 time_delta = current_time - self._last_display_time
10000                 if self._displayed and \
10001                         not self._changed:
10002                         if not self._isatty:
10003                                 return
10004                         if time_delta < self._min_display_latency:
10005                                 return
10006
10007                 self._last_display_time = current_time
10008                 self._changed = False
10009                 self._display_status()
10010
10011         def _display_status(self):
10012                 # Don't use len(self._completed_tasks) here since that also
10013                 # can include uninstall tasks.
10014                 curval_str = str(self.curval)
10015                 maxval_str = str(self.maxval)
10016                 running_str = str(self.running)
10017                 failed_str = str(self.failed)
10018                 load_avg_str = self._load_avg_str()
10019
10020                 color_output = StringIO()
10021                 plain_output = StringIO()
10022                 style_file = portage.output.ConsoleStyleFile(color_output)
10023                 style_file.write_listener = plain_output
10024                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10025                 style_writer.style_listener = style_file.new_styles
10026                 f = formatter.AbstractFormatter(style_writer)
10027
10028                 number_style = "INFORM"
10029                 f.add_literal_data("Jobs: ")
10030                 f.push_style(number_style)
10031                 f.add_literal_data(curval_str)
10032                 f.pop_style()
10033                 f.add_literal_data(" of ")
10034                 f.push_style(number_style)
10035                 f.add_literal_data(maxval_str)
10036                 f.pop_style()
10037                 f.add_literal_data(" complete")
10038
10039                 if self.running:
10040                         f.add_literal_data(", ")
10041                         f.push_style(number_style)
10042                         f.add_literal_data(running_str)
10043                         f.pop_style()
10044                         f.add_literal_data(" running")
10045
10046                 if self.failed:
10047                         f.add_literal_data(", ")
10048                         f.push_style(number_style)
10049                         f.add_literal_data(failed_str)
10050                         f.pop_style()
10051                         f.add_literal_data(" failed")
10052
10053                 padding = self._jobs_column_width - len(plain_output.getvalue())
10054                 if padding > 0:
10055                         f.add_literal_data(padding * " ")
10056
10057                 f.add_literal_data("Load avg: ")
10058                 f.add_literal_data(load_avg_str)
10059
10060                 # Truncate to fit width, to avoid making the terminal scroll if the
10061                 # line overflows (happens when the load average is large).
10062                 plain_output = plain_output.getvalue()
10063                 if self._isatty and len(plain_output) > self.width:
10064                         # Use plain_output here since it's easier to truncate
10065                         # properly than the color output which contains console
10066                         # color codes.
10067                         self._update(plain_output[:self.width])
10068                 else:
10069                         self._update(color_output.getvalue())
10070
10071                 xtermTitle(" ".join(plain_output.split()))
10072
10073 class Scheduler(PollScheduler):
10074
10075         _opts_ignore_blockers = \
10076                 frozenset(["--buildpkgonly",
10077                 "--fetchonly", "--fetch-all-uri",
10078                 "--nodeps", "--pretend"])
10079
10080         _opts_no_background = \
10081                 frozenset(["--pretend",
10082                 "--fetchonly", "--fetch-all-uri"])
10083
10084         _opts_no_restart = frozenset(["--buildpkgonly",
10085                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10086
10087         _bad_resume_opts = set(["--ask", "--changelog",
10088                 "--resume", "--skipfirst"])
10089
10090         _fetch_log = "/var/log/emerge-fetch.log"
10091
10092         class _iface_class(SlotObject):
10093                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10094                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10095                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10096                         "unregister")
10097
10098         class _fetch_iface_class(SlotObject):
10099                 __slots__ = ("log_file", "schedule")
10100
10101         _task_queues_class = slot_dict_class(
10102                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10103
10104         class _build_opts_class(SlotObject):
10105                 __slots__ = ("buildpkg", "buildpkgonly",
10106                         "fetch_all_uri", "fetchonly", "pretend")
10107
10108         class _binpkg_opts_class(SlotObject):
10109                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10110
10111         class _pkg_count_class(SlotObject):
10112                 __slots__ = ("curval", "maxval")
10113
10114         class _emerge_log_class(SlotObject):
10115                 __slots__ = ("xterm_titles",)
10116
10117                 def log(self, *pargs, **kwargs):
10118                         if not self.xterm_titles:
10119                                 # Avoid interference with the scheduler's status display.
10120                                 kwargs.pop("short_msg", None)
10121                         emergelog(self.xterm_titles, *pargs, **kwargs)
10122
10123         class _failed_pkg(SlotObject):
10124                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10125
10126         class _ConfigPool(object):
10127                 """Interface for a task to temporarily allocate a config
10128                 instance from a pool. This allows a task to be constructed
10129                 long before the config instance actually becomes needed, like
10130                 when prefetchers are constructed for the whole merge list."""
10131                 __slots__ = ("_root", "_allocate", "_deallocate")
10132                 def __init__(self, root, allocate, deallocate):
10133                         self._root = root
10134                         self._allocate = allocate
10135                         self._deallocate = deallocate
10136                 def allocate(self):
10137                         return self._allocate(self._root)
10138                 def deallocate(self, settings):
10139                         self._deallocate(settings)
10140
10141         class _unknown_internal_error(portage.exception.PortageException):
10142                 """
10143                 Used internally to terminate scheduling. The specific reason for
10144                 the failure should have been dumped to stderr.
10145                 """
10146                 def __init__(self, value=""):
10147                         portage.exception.PortageException.__init__(self, value)
10148
10149         def __init__(self, settings, trees, mtimedb, myopts,
10150                 spinner, mergelist, favorites, digraph):
10151                 PollScheduler.__init__(self)
10152                 self.settings = settings
10153                 self.target_root = settings["ROOT"]
10154                 self.trees = trees
10155                 self.myopts = myopts
10156                 self._spinner = spinner
10157                 self._mtimedb = mtimedb
10158                 self._mergelist = mergelist
10159                 self._favorites = favorites
10160                 self._args_set = InternalPackageSet(favorites)
10161                 self._build_opts = self._build_opts_class()
10162                 for k in self._build_opts.__slots__:
10163                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10164                 self._binpkg_opts = self._binpkg_opts_class()
10165                 for k in self._binpkg_opts.__slots__:
10166                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10167
10168                 self.curval = 0
10169                 self._logger = self._emerge_log_class()
10170                 self._task_queues = self._task_queues_class()
10171                 for k in self._task_queues.allowed_keys:
10172                         setattr(self._task_queues, k,
10173                                 SequentialTaskQueue())
10174
10175                 # Holds merges that will wait to be executed when no builds are
10176                 # executing. This is useful for system packages since dependencies
10177                 # on system packages are frequently unspecified.
10178                 self._merge_wait_queue = []
10179                 # Holds merges that have been transfered from the merge_wait_queue to
10180                 # the actual merge queue. They are removed from this list upon
10181                 # completion. Other packages can start building only when this list is
10182                 # empty.
10183                 self._merge_wait_scheduled = []
10184
10185                 # Holds system packages and their deep runtime dependencies. Before
10186                 # being merged, these packages go to merge_wait_queue, to be merged
10187                 # when no other packages are building.
10188                 self._deep_system_deps = set()
10189
10190                 # Holds packages to merge which will satisfy currently unsatisfied
10191                 # deep runtime dependencies of system packages. If this is not empty
10192                 # then no parallel builds will be spawned until it is empty. This
10193                 # minimizes the possibility that a build will fail due to the system
10194                 # being in a fragile state. For example, see bug #259954.
10195                 self._unsatisfied_system_deps = set()
10196
10197                 self._status_display = JobStatusDisplay()
10198                 self._max_load = myopts.get("--load-average")
10199                 max_jobs = myopts.get("--jobs")
10200                 if max_jobs is None:
10201                         max_jobs = 1
10202                 self._set_max_jobs(max_jobs)
10203
10204                 # The root where the currently running
10205                 # portage instance is installed.
10206                 self._running_root = trees["/"]["root_config"]
10207                 self.edebug = 0
10208                 if settings.get("PORTAGE_DEBUG", "") == "1":
10209                         self.edebug = 1
10210                 self.pkgsettings = {}
10211                 self._config_pool = {}
10212                 self._blocker_db = {}
10213                 for root in trees:
10214                         self._config_pool[root] = []
10215                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10216
10217                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10218                         schedule=self._schedule_fetch)
10219                 self._sched_iface = self._iface_class(
10220                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10221                         dblinkDisplayMerge=self._dblink_display_merge,
10222                         dblinkElog=self._dblink_elog,
10223                         dblinkEmergeLog=self._dblink_emerge_log,
10224                         fetch=fetch_iface, register=self._register,
10225                         schedule=self._schedule_wait,
10226                         scheduleSetup=self._schedule_setup,
10227                         scheduleUnpack=self._schedule_unpack,
10228                         scheduleYield=self._schedule_yield,
10229                         unregister=self._unregister)
10230
10231                 self._prefetchers = weakref.WeakValueDictionary()
10232                 self._pkg_queue = []
10233                 self._completed_tasks = set()
10234
10235                 self._failed_pkgs = []
10236                 self._failed_pkgs_all = []
10237                 self._failed_pkgs_die_msgs = []
10238                 self._post_mod_echo_msgs = []
10239                 self._parallel_fetch = False
10240                 merge_count = len([x for x in mergelist \
10241                         if isinstance(x, Package) and x.operation == "merge"])
10242                 self._pkg_count = self._pkg_count_class(
10243                         curval=0, maxval=merge_count)
10244                 self._status_display.maxval = self._pkg_count.maxval
10245
10246                 # The load average takes some time to respond when new
10247                 # jobs are added, so we need to limit the rate of adding
10248                 # new jobs.
10249                 self._job_delay_max = 10
10250                 self._job_delay_factor = 1.0
10251                 self._job_delay_exp = 1.5
10252                 self._previous_job_start_time = None
10253
10254                 self._set_digraph(digraph)
10255
10256                 # This is used to memoize the _choose_pkg() result when
10257                 # no packages can be chosen until one of the existing
10258                 # jobs completes.
10259                 self._choose_pkg_return_early = False
10260
10261                 features = self.settings.features
10262                 if "parallel-fetch" in features and \
10263                         not ("--pretend" in self.myopts or \
10264                         "--fetch-all-uri" in self.myopts or \
10265                         "--fetchonly" in self.myopts):
10266                         if "distlocks" not in features:
10267                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10268                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10269                                         "requires the distlocks feature enabled"+"\n",
10270                                         noiselevel=-1)
10271                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10272                                         "thus parallel-fetching is being disabled"+"\n",
10273                                         noiselevel=-1)
10274                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10275                         elif len(mergelist) > 1:
10276                                 self._parallel_fetch = True
10277
10278                 if self._parallel_fetch:
10279                                 # clear out existing fetch log if it exists
10280                                 try:
10281                                         open(self._fetch_log, 'w')
10282                                 except EnvironmentError:
10283                                         pass
10284
10285                 self._running_portage = None
10286                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10287                         portage.const.PORTAGE_PACKAGE_ATOM)
10288                 if portage_match:
10289                         cpv = portage_match.pop()
10290                         self._running_portage = self._pkg(cpv, "installed",
10291                                 self._running_root, installed=True)
10292
10293         def _poll(self, timeout=None):
10294                 self._schedule()
10295                 PollScheduler._poll(self, timeout=timeout)
10296
10297         def _set_max_jobs(self, max_jobs):
10298                 self._max_jobs = max_jobs
10299                 self._task_queues.jobs.max_jobs = max_jobs
10300
10301         def _background_mode(self):
10302                 """
10303                 Check if background mode is enabled and adjust states as necessary.
10304
10305                 @rtype: bool
10306                 @returns: True if background mode is enabled, False otherwise.
10307                 """
10308                 background = (self._max_jobs is True or \
10309                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10310                         not bool(self._opts_no_background.intersection(self.myopts))
10311
10312                 if background:
10313                         interactive_tasks = self._get_interactive_tasks()
10314                         if interactive_tasks:
10315                                 background = False
10316                                 writemsg_level(">>> Sending package output to stdio due " + \
10317                                         "to interactive package(s):\n",
10318                                         level=logging.INFO, noiselevel=-1)
10319                                 msg = [""]
10320                                 for pkg in interactive_tasks:
10321                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10322                                         if pkg.root != "/":
10323                                                 pkg_str += " for " + pkg.root
10324                                         msg.append(pkg_str)
10325                                 msg.append("")
10326                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10327                                         level=logging.INFO, noiselevel=-1)
10328                                 if self._max_jobs is True or self._max_jobs > 1:
10329                                         self._set_max_jobs(1)
10330                                         writemsg_level(">>> Setting --jobs=1 due " + \
10331                                                 "to the above interactive package(s)\n",
10332                                                 level=logging.INFO, noiselevel=-1)
10333
10334                 self._status_display.quiet = \
10335                         not background or \
10336                         ("--quiet" in self.myopts and \
10337                         "--verbose" not in self.myopts)
10338
10339                 self._logger.xterm_titles = \
10340                         "notitles" not in self.settings.features and \
10341                         self._status_display.quiet
10342
10343                 return background
10344
10345         def _get_interactive_tasks(self):
10346                 from portage import flatten
10347                 from portage.dep import use_reduce, paren_reduce
10348                 interactive_tasks = []
10349                 for task in self._mergelist:
10350                         if not (isinstance(task, Package) and \
10351                                 task.operation == "merge"):
10352                                 continue
10353                         try:
10354                                 properties = flatten(use_reduce(paren_reduce(
10355                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10356                         except portage.exception.InvalidDependString, e:
10357                                 show_invalid_depstring_notice(task,
10358                                         task.metadata["PROPERTIES"], str(e))
10359                                 raise self._unknown_internal_error()
10360                         if "interactive" in properties:
10361                                 interactive_tasks.append(task)
10362                 return interactive_tasks
10363
10364         def _set_digraph(self, digraph):
10365                 if "--nodeps" in self.myopts or \
10366                         (self._max_jobs is not True and self._max_jobs < 2):
10367                         # save some memory
10368                         self._digraph = None
10369                         return
10370
10371                 self._digraph = digraph
10372                 self._find_system_deps()
10373                 self._prune_digraph()
10374                 self._prevent_builddir_collisions()
10375
10376         def _find_system_deps(self):
10377                 """
10378                 Find system packages and their deep runtime dependencies. Before being
10379                 merged, these packages go to merge_wait_queue, to be merged when no
10380                 other packages are building.
10381                 """
10382                 deep_system_deps = self._deep_system_deps
10383                 deep_system_deps.clear()
10384                 deep_system_deps.update(
10385                         _find_deep_system_runtime_deps(self._digraph))
10386                 deep_system_deps.difference_update([pkg for pkg in \
10387                         deep_system_deps if pkg.operation != "merge"])
10388
10389         def _prune_digraph(self):
10390                 """
10391                 Prune any root nodes that are irrelevant.
10392                 """
10393
10394                 graph = self._digraph
10395                 completed_tasks = self._completed_tasks
10396                 removed_nodes = set()
10397                 while True:
10398                         for node in graph.root_nodes():
10399                                 if not isinstance(node, Package) or \
10400                                         (node.installed and node.operation == "nomerge") or \
10401                                         node.onlydeps or \
10402                                         node in completed_tasks:
10403                                         removed_nodes.add(node)
10404                         if removed_nodes:
10405                                 graph.difference_update(removed_nodes)
10406                         if not removed_nodes:
10407                                 break
10408                         removed_nodes.clear()
10409
10410         def _prevent_builddir_collisions(self):
10411                 """
10412                 When building stages, sometimes the same exact cpv needs to be merged
10413                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10414                 in the builddir. Currently, normal file locks would be inappropriate
10415                 for this purpose since emerge holds all of it's build dir locks from
10416                 the main process.
10417                 """
10418                 cpv_map = {}
10419                 for pkg in self._mergelist:
10420                         if not isinstance(pkg, Package):
10421                                 # a satisfied blocker
10422                                 continue
10423                         if pkg.installed:
10424                                 continue
10425                         if pkg.cpv not in cpv_map:
10426                                 cpv_map[pkg.cpv] = [pkg]
10427                                 continue
10428                         for earlier_pkg in cpv_map[pkg.cpv]:
10429                                 self._digraph.add(earlier_pkg, pkg,
10430                                         priority=DepPriority(buildtime=True))
10431                         cpv_map[pkg.cpv].append(pkg)
10432
10433         class _pkg_failure(portage.exception.PortageException):
10434                 """
10435                 An instance of this class is raised by unmerge() when
10436                 an uninstallation fails.
10437                 """
10438                 status = 1
10439                 def __init__(self, *pargs):
10440                         portage.exception.PortageException.__init__(self, pargs)
10441                         if pargs:
10442                                 self.status = pargs[0]
10443
10444         def _schedule_fetch(self, fetcher):
10445                 """
10446                 Schedule a fetcher on the fetch queue, in order to
10447                 serialize access to the fetch log.
10448                 """
10449                 self._task_queues.fetch.addFront(fetcher)
10450
10451         def _schedule_setup(self, setup_phase):
10452                 """
10453                 Schedule a setup phase on the merge queue, in order to
10454                 serialize unsandboxed access to the live filesystem.
10455                 """
10456                 self._task_queues.merge.addFront(setup_phase)
10457                 self._schedule()
10458
10459         def _schedule_unpack(self, unpack_phase):
10460                 """
10461                 Schedule an unpack phase on the unpack queue, in order
10462                 to serialize $DISTDIR access for live ebuilds.
10463                 """
10464                 self._task_queues.unpack.add(unpack_phase)
10465
10466         def _find_blockers(self, new_pkg):
10467                 """
10468                 Returns a callable which should be called only when
10469                 the vdb lock has been acquired.
10470                 """
10471                 def get_blockers():
10472                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10473                 return get_blockers
10474
10475         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10476                 if self._opts_ignore_blockers.intersection(self.myopts):
10477                         return None
10478
10479                 # Call gc.collect() here to avoid heap overflow that
10480                 # triggers 'Cannot allocate memory' errors (reported
10481                 # with python-2.5).
10482                 import gc
10483                 gc.collect()
10484
10485                 blocker_db = self._blocker_db[new_pkg.root]
10486
10487                 blocker_dblinks = []
10488                 for blocking_pkg in blocker_db.findInstalledBlockers(
10489                         new_pkg, acquire_lock=acquire_lock):
10490                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10491                                 continue
10492                         if new_pkg.cpv == blocking_pkg.cpv:
10493                                 continue
10494                         blocker_dblinks.append(portage.dblink(
10495                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10496                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10497                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10498
10499                 gc.collect()
10500
10501                 return blocker_dblinks
10502
10503         def _dblink_pkg(self, pkg_dblink):
10504                 cpv = pkg_dblink.mycpv
10505                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10506                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10507                 installed = type_name == "installed"
10508                 return self._pkg(cpv, type_name, root_config, installed=installed)
10509
10510         def _append_to_log_path(self, log_path, msg):
10511                 f = open(log_path, 'a')
10512                 try:
10513                         f.write(msg)
10514                 finally:
10515                         f.close()
10516
10517         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10518
10519                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10520                 log_file = None
10521                 out = sys.stdout
10522                 background = self._background
10523
10524                 if background and log_path is not None:
10525                         log_file = open(log_path, 'a')
10526                         out = log_file
10527
10528                 try:
10529                         for msg in msgs:
10530                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10531                 finally:
10532                         if log_file is not None:
10533                                 log_file.close()
10534
10535         def _dblink_emerge_log(self, msg):
10536                 self._logger.log(msg)
10537
10538         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10539                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10540                 background = self._background
10541
10542                 if log_path is None:
10543                         if not (background and level < logging.WARN):
10544                                 portage.util.writemsg_level(msg,
10545                                         level=level, noiselevel=noiselevel)
10546                 else:
10547                         if not background:
10548                                 portage.util.writemsg_level(msg,
10549                                         level=level, noiselevel=noiselevel)
10550                         self._append_to_log_path(log_path, msg)
10551
10552         def _dblink_ebuild_phase(self,
10553                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10554                 """
10555                 Using this callback for merge phases allows the scheduler
10556                 to run while these phases execute asynchronously, and allows
10557                 the scheduler control output handling.
10558                 """
10559
10560                 scheduler = self._sched_iface
10561                 settings = pkg_dblink.settings
10562                 pkg = self._dblink_pkg(pkg_dblink)
10563                 background = self._background
10564                 log_path = settings.get("PORTAGE_LOG_FILE")
10565
10566                 ebuild_phase = EbuildPhase(background=background,
10567                         pkg=pkg, phase=phase, scheduler=scheduler,
10568                         settings=settings, tree=pkg_dblink.treetype)
10569                 ebuild_phase.start()
10570                 ebuild_phase.wait()
10571
10572                 return ebuild_phase.returncode
10573
10574         def _generate_digests(self):
10575                 """
10576                 Generate digests if necessary for --digests or FEATURES=digest.
10577                 In order to avoid interference, this must done before parallel
10578                 tasks are started.
10579                 """
10580
10581                 if '--fetchonly' in self.myopts:
10582                         return os.EX_OK
10583
10584                 digest = '--digest' in self.myopts
10585                 if not digest:
10586                         for pkgsettings in self.pkgsettings.itervalues():
10587                                 if 'digest' in pkgsettings.features:
10588                                         digest = True
10589                                         break
10590
10591                 if not digest:
10592                         return os.EX_OK
10593
10594                 for x in self._mergelist:
10595                         if not isinstance(x, Package) or \
10596                                 x.type_name != 'ebuild' or \
10597                                 x.operation != 'merge':
10598                                 continue
10599                         pkgsettings = self.pkgsettings[x.root]
10600                         if '--digest' not in self.myopts and \
10601                                 'digest' not in pkgsettings.features:
10602                                 continue
10603                         portdb = x.root_config.trees['porttree'].dbapi
10604                         ebuild_path = portdb.findname(x.cpv)
10605                         if not ebuild_path:
10606                                 writemsg_level(
10607                                         "!!! Could not locate ebuild for '%s'.\n" \
10608                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10609                                 return 1
10610                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10611                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10612                                 writemsg_level(
10613                                         "!!! Unable to generate manifest for '%s'.\n" \
10614                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10615                                 return 1
10616
10617                 return os.EX_OK
10618
10619         def _check_manifests(self):
10620                 # Verify all the manifests now so that the user is notified of failure
10621                 # as soon as possible.
10622                 if "strict" not in self.settings.features or \
10623                         "--fetchonly" in self.myopts or \
10624                         "--fetch-all-uri" in self.myopts:
10625                         return os.EX_OK
10626
10627                 shown_verifying_msg = False
10628                 quiet_settings = {}
10629                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10630                         quiet_config = portage.config(clone=pkgsettings)
10631                         quiet_config["PORTAGE_QUIET"] = "1"
10632                         quiet_config.backup_changes("PORTAGE_QUIET")
10633                         quiet_settings[myroot] = quiet_config
10634                         del quiet_config
10635
10636                 for x in self._mergelist:
10637                         if not isinstance(x, Package) or \
10638                                 x.type_name != "ebuild":
10639                                 continue
10640
10641                         if not shown_verifying_msg:
10642                                 shown_verifying_msg = True
10643                                 self._status_msg("Verifying ebuild manifests")
10644
10645                         root_config = x.root_config
10646                         portdb = root_config.trees["porttree"].dbapi
10647                         quiet_config = quiet_settings[root_config.root]
10648                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10649                         if not portage.digestcheck([], quiet_config, strict=True):
10650                                 return 1
10651
10652                 return os.EX_OK
10653
10654         def _add_prefetchers(self):
10655
10656                 if not self._parallel_fetch:
10657                         return
10658
10659                 if self._parallel_fetch:
10660                         self._status_msg("Starting parallel fetch")
10661
10662                         prefetchers = self._prefetchers
10663                         getbinpkg = "--getbinpkg" in self.myopts
10664
10665                         # In order to avoid "waiting for lock" messages
10666                         # at the beginning, which annoy users, never
10667                         # spawn a prefetcher for the first package.
10668                         for pkg in self._mergelist[1:]:
10669                                 prefetcher = self._create_prefetcher(pkg)
10670                                 if prefetcher is not None:
10671                                         self._task_queues.fetch.add(prefetcher)
10672                                         prefetchers[pkg] = prefetcher
10673
10674         def _create_prefetcher(self, pkg):
10675                 """
10676                 @return: a prefetcher, or None if not applicable
10677                 """
10678                 prefetcher = None
10679
10680                 if not isinstance(pkg, Package):
10681                         pass
10682
10683                 elif pkg.type_name == "ebuild":
10684
10685                         prefetcher = EbuildFetcher(background=True,
10686                                 config_pool=self._ConfigPool(pkg.root,
10687                                 self._allocate_config, self._deallocate_config),
10688                                 fetchonly=1, logfile=self._fetch_log,
10689                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10690
10691                 elif pkg.type_name == "binary" and \
10692                         "--getbinpkg" in self.myopts and \
10693                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10694
10695                         prefetcher = BinpkgPrefetcher(background=True,
10696                                 pkg=pkg, scheduler=self._sched_iface)
10697
10698                 return prefetcher
10699
10700         def _is_restart_scheduled(self):
10701                 """
10702                 Check if the merge list contains a replacement
10703                 for the current running instance, that will result
10704                 in restart after merge.
10705                 @rtype: bool
10706                 @returns: True if a restart is scheduled, False otherwise.
10707                 """
10708                 if self._opts_no_restart.intersection(self.myopts):
10709                         return False
10710
10711                 mergelist = self._mergelist
10712
10713                 for i, pkg in enumerate(mergelist):
10714                         if self._is_restart_necessary(pkg) and \
10715                                 i != len(mergelist) - 1:
10716                                 return True
10717
10718                 return False
10719
10720         def _is_restart_necessary(self, pkg):
10721                 """
10722                 @return: True if merging the given package
10723                         requires restart, False otherwise.
10724                 """
10725
10726                 # Figure out if we need a restart.
10727                 if pkg.root == self._running_root.root and \
10728                         portage.match_from_list(
10729                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10730                         if self._running_portage:
10731                                 return pkg.cpv != self._running_portage.cpv
10732                         return True
10733                 return False
10734
10735         def _restart_if_necessary(self, pkg):
10736                 """
10737                 Use execv() to restart emerge. This happens
10738                 if portage upgrades itself and there are
10739                 remaining packages in the list.
10740                 """
10741
10742                 if self._opts_no_restart.intersection(self.myopts):
10743                         return
10744
10745                 if not self._is_restart_necessary(pkg):
10746                         return
10747
10748                 if pkg == self._mergelist[-1]:
10749                         return
10750
10751                 self._main_loop_cleanup()
10752
10753                 logger = self._logger
10754                 pkg_count = self._pkg_count
10755                 mtimedb = self._mtimedb
10756                 bad_resume_opts = self._bad_resume_opts
10757
10758                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10759                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10760
10761                 logger.log(" *** RESTARTING " + \
10762                         "emerge via exec() after change of " + \
10763                         "portage version.")
10764
10765                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10766                 mtimedb.commit()
10767                 portage.run_exitfuncs()
10768                 mynewargv = [sys.argv[0], "--resume"]
10769                 resume_opts = self.myopts.copy()
10770                 # For automatic resume, we need to prevent
10771                 # any of bad_resume_opts from leaking in
10772                 # via EMERGE_DEFAULT_OPTS.
10773                 resume_opts["--ignore-default-opts"] = True
10774                 for myopt, myarg in resume_opts.iteritems():
10775                         if myopt not in bad_resume_opts:
10776                                 if myarg is True:
10777                                         mynewargv.append(myopt)
10778                                 else:
10779                                         mynewargv.append(myopt +"="+ str(myarg))
10780                 # priority only needs to be adjusted on the first run
10781                 os.environ["PORTAGE_NICENESS"] = "0"
10782                 os.execv(mynewargv[0], mynewargv)
10783
10784         def merge(self):
10785
10786                 if "--resume" in self.myopts:
10787                         # We're resuming.
10788                         portage.writemsg_stdout(
10789                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10790                         self._logger.log(" *** Resuming merge...")
10791
10792                 self._save_resume_list()
10793
10794                 try:
10795                         self._background = self._background_mode()
10796                 except self._unknown_internal_error:
10797                         return 1
10798
10799                 for root in self.trees:
10800                         root_config = self.trees[root]["root_config"]
10801
10802                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10803                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10804                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10805                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10806                         if not tmpdir or not os.path.isdir(tmpdir):
10807                                 msg = "The directory specified in your " + \
10808                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10809                                 "does not exist. Please create this " + \
10810                                 "directory or correct your PORTAGE_TMPDIR setting."
10811                                 msg = textwrap.wrap(msg, 70)
10812                                 out = portage.output.EOutput()
10813                                 for l in msg:
10814                                         out.eerror(l)
10815                                 return 1
10816
10817                         if self._background:
10818                                 root_config.settings.unlock()
10819                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10820                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10821                                 root_config.settings.lock()
10822
10823                         self.pkgsettings[root] = portage.config(
10824                                 clone=root_config.settings)
10825
10826                 rval = self._generate_digests()
10827                 if rval != os.EX_OK:
10828                         return rval
10829
10830                 rval = self._check_manifests()
10831                 if rval != os.EX_OK:
10832                         return rval
10833
10834                 keep_going = "--keep-going" in self.myopts
10835                 fetchonly = self._build_opts.fetchonly
10836                 mtimedb = self._mtimedb
10837                 failed_pkgs = self._failed_pkgs
10838
10839                 while True:
10840                         rval = self._merge()
10841                         if rval == os.EX_OK or fetchonly or not keep_going:
10842                                 break
10843                         if "resume" not in mtimedb:
10844                                 break
10845                         mergelist = self._mtimedb["resume"].get("mergelist")
10846                         if not mergelist:
10847                                 break
10848
10849                         if not failed_pkgs:
10850                                 break
10851
10852                         for failed_pkg in failed_pkgs:
10853                                 mergelist.remove(list(failed_pkg.pkg))
10854
10855                         self._failed_pkgs_all.extend(failed_pkgs)
10856                         del failed_pkgs[:]
10857
10858                         if not mergelist:
10859                                 break
10860
10861                         if not self._calc_resume_list():
10862                                 break
10863
10864                         clear_caches(self.trees)
10865                         if not self._mergelist:
10866                                 break
10867
10868                         self._save_resume_list()
10869                         self._pkg_count.curval = 0
10870                         self._pkg_count.maxval = len([x for x in self._mergelist \
10871                                 if isinstance(x, Package) and x.operation == "merge"])
10872                         self._status_display.maxval = self._pkg_count.maxval
10873
10874                 self._logger.log(" *** Finished. Cleaning up...")
10875
10876                 if failed_pkgs:
10877                         self._failed_pkgs_all.extend(failed_pkgs)
10878                         del failed_pkgs[:]
10879
10880                 background = self._background
10881                 failure_log_shown = False
10882                 if background and len(self._failed_pkgs_all) == 1:
10883                         # If only one package failed then just show it's
10884                         # whole log for easy viewing.
10885                         failed_pkg = self._failed_pkgs_all[-1]
10886                         build_dir = failed_pkg.build_dir
10887                         log_file = None
10888
10889                         log_paths = [failed_pkg.build_log]
10890
10891                         log_path = self._locate_failure_log(failed_pkg)
10892                         if log_path is not None:
10893                                 try:
10894                                         log_file = open(log_path)
10895                                 except IOError:
10896                                         pass
10897
10898                         if log_file is not None:
10899                                 try:
10900                                         for line in log_file:
10901                                                 writemsg_level(line, noiselevel=-1)
10902                                 finally:
10903                                         log_file.close()
10904                                 failure_log_shown = True
10905
10906                 # Dump mod_echo output now since it tends to flood the terminal.
10907                 # This allows us to avoid having more important output, generated
10908                 # later, from being swept away by the mod_echo output.
10909                 mod_echo_output =  _flush_elog_mod_echo()
10910
10911                 if background and not failure_log_shown and \
10912                         self._failed_pkgs_all and \
10913                         self._failed_pkgs_die_msgs and \
10914                         not mod_echo_output:
10915
10916                         printer = portage.output.EOutput()
10917                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10918                                 root_msg = ""
10919                                 if mysettings["ROOT"] != "/":
10920                                         root_msg = " merged to %s" % mysettings["ROOT"]
10921                                 print
10922                                 printer.einfo("Error messages for package %s%s:" % \
10923                                         (colorize("INFORM", key), root_msg))
10924                                 print
10925                                 for phase in portage.const.EBUILD_PHASES:
10926                                         if phase not in logentries:
10927                                                 continue
10928                                         for msgtype, msgcontent in logentries[phase]:
10929                                                 if isinstance(msgcontent, basestring):
10930                                                         msgcontent = [msgcontent]
10931                                                 for line in msgcontent:
10932                                                         printer.eerror(line.strip("\n"))
10933
10934                 if self._post_mod_echo_msgs:
10935                         for msg in self._post_mod_echo_msgs:
10936                                 msg()
10937
10938                 if len(self._failed_pkgs_all) > 1 or \
10939                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10940                         if len(self._failed_pkgs_all) > 1:
10941                                 msg = "The following %d packages have " % \
10942                                         len(self._failed_pkgs_all) + \
10943                                         "failed to build or install:"
10944                         else:
10945                                 msg = "The following package has " + \
10946                                         "failed to build or install:"
10947                         prefix = bad(" * ")
10948                         writemsg(prefix + "\n", noiselevel=-1)
10949                         from textwrap import wrap
10950                         for line in wrap(msg, 72):
10951                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10952                         writemsg(prefix + "\n", noiselevel=-1)
10953                         for failed_pkg in self._failed_pkgs_all:
10954                                 writemsg("%s\t%s\n" % (prefix,
10955                                         colorize("INFORM", str(failed_pkg.pkg))),
10956                                         noiselevel=-1)
10957                         writemsg(prefix + "\n", noiselevel=-1)
10958
10959                 return rval
10960
10961         def _elog_listener(self, mysettings, key, logentries, fulltext):
10962                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10963                 if errors:
10964                         self._failed_pkgs_die_msgs.append(
10965                                 (mysettings, key, errors))
10966
10967         def _locate_failure_log(self, failed_pkg):
10968
10969                 build_dir = failed_pkg.build_dir
10970                 log_file = None
10971
10972                 log_paths = [failed_pkg.build_log]
10973
10974                 for log_path in log_paths:
10975                         if not log_path:
10976                                 continue
10977
10978                         try:
10979                                 log_size = os.stat(log_path).st_size
10980                         except OSError:
10981                                 continue
10982
10983                         if log_size == 0:
10984                                 continue
10985
10986                         return log_path
10987
10988                 return None
10989
10990         def _add_packages(self):
10991                 pkg_queue = self._pkg_queue
10992                 for pkg in self._mergelist:
10993                         if isinstance(pkg, Package):
10994                                 pkg_queue.append(pkg)
10995                         elif isinstance(pkg, Blocker):
10996                                 pass
10997
10998         def _system_merge_started(self, merge):
10999                 """
11000                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11001                 """
11002                 graph = self._digraph
11003                 if graph is None:
11004                         return
11005                 pkg = merge.merge.pkg
11006
11007                 # Skip this if $ROOT != / since it shouldn't matter if there
11008                 # are unsatisfied system runtime deps in this case.
11009                 if pkg.root != '/':
11010                         return
11011
11012                 completed_tasks = self._completed_tasks
11013                 unsatisfied = self._unsatisfied_system_deps
11014
11015                 def ignore_non_runtime_or_satisfied(priority):
11016                         """
11017                         Ignore non-runtime and satisfied runtime priorities.
11018                         """
11019                         if isinstance(priority, DepPriority) and \
11020                                 not priority.satisfied and \
11021                                 (priority.runtime or priority.runtime_post):
11022                                 return False
11023                         return True
11024
11025                 # When checking for unsatisfied runtime deps, only check
11026                 # direct deps since indirect deps are checked when the
11027                 # corresponding parent is merged.
11028                 for child in graph.child_nodes(pkg,
11029                         ignore_priority=ignore_non_runtime_or_satisfied):
11030                         if not isinstance(child, Package) or \
11031                                 child.operation == 'uninstall':
11032                                 continue
11033                         if child is pkg:
11034                                 continue
11035                         if child.operation == 'merge' and \
11036                                 child not in completed_tasks:
11037                                 unsatisfied.add(child)
11038
11039         def _merge_wait_exit_handler(self, task):
11040                 self._merge_wait_scheduled.remove(task)
11041                 self._merge_exit(task)
11042
11043         def _merge_exit(self, merge):
11044                 self._do_merge_exit(merge)
11045                 self._deallocate_config(merge.merge.settings)
11046                 if merge.returncode == os.EX_OK and \
11047                         not merge.merge.pkg.installed:
11048                         self._status_display.curval += 1
11049                 self._status_display.merges = len(self._task_queues.merge)
11050                 self._schedule()
11051
11052         def _do_merge_exit(self, merge):
11053                 pkg = merge.merge.pkg
11054                 if merge.returncode != os.EX_OK:
11055                         settings = merge.merge.settings
11056                         build_dir = settings.get("PORTAGE_BUILDDIR")
11057                         build_log = settings.get("PORTAGE_LOG_FILE")
11058
11059                         self._failed_pkgs.append(self._failed_pkg(
11060                                 build_dir=build_dir, build_log=build_log,
11061                                 pkg=pkg,
11062                                 returncode=merge.returncode))
11063                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11064
11065                         self._status_display.failed = len(self._failed_pkgs)
11066                         return
11067
11068                 self._task_complete(pkg)
11069                 pkg_to_replace = merge.merge.pkg_to_replace
11070                 if pkg_to_replace is not None:
11071                         # When a package is replaced, mark it's uninstall
11072                         # task complete (if any).
11073                         uninst_hash_key = \
11074                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11075                         self._task_complete(uninst_hash_key)
11076
11077                 if pkg.installed:
11078                         return
11079
11080                 self._restart_if_necessary(pkg)
11081
11082                 # Call mtimedb.commit() after each merge so that
11083                 # --resume still works after being interrupted
11084                 # by reboot, sigkill or similar.
11085                 mtimedb = self._mtimedb
11086                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11087                 if not mtimedb["resume"]["mergelist"]:
11088                         del mtimedb["resume"]
11089                 mtimedb.commit()
11090
11091         def _build_exit(self, build):
11092                 if build.returncode == os.EX_OK:
11093                         self.curval += 1
11094                         merge = PackageMerge(merge=build)
11095                         if not build.build_opts.buildpkgonly and \
11096                                 build.pkg in self._deep_system_deps:
11097                                 # Since dependencies on system packages are frequently
11098                                 # unspecified, merge them only when no builds are executing.
11099                                 self._merge_wait_queue.append(merge)
11100                                 merge.addStartListener(self._system_merge_started)
11101                         else:
11102                                 merge.addExitListener(self._merge_exit)
11103                                 self._task_queues.merge.add(merge)
11104                                 self._status_display.merges = len(self._task_queues.merge)
11105                 else:
11106                         settings = build.settings
11107                         build_dir = settings.get("PORTAGE_BUILDDIR")
11108                         build_log = settings.get("PORTAGE_LOG_FILE")
11109
11110                         self._failed_pkgs.append(self._failed_pkg(
11111                                 build_dir=build_dir, build_log=build_log,
11112                                 pkg=build.pkg,
11113                                 returncode=build.returncode))
11114                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11115
11116                         self._status_display.failed = len(self._failed_pkgs)
11117                         self._deallocate_config(build.settings)
11118                 self._jobs -= 1
11119                 self._status_display.running = self._jobs
11120                 self._schedule()
11121
11122         def _extract_exit(self, build):
11123                 self._build_exit(build)
11124
11125         def _task_complete(self, pkg):
11126                 self._completed_tasks.add(pkg)
11127                 self._unsatisfied_system_deps.discard(pkg)
11128                 self._choose_pkg_return_early = False
11129
11130         def _merge(self):
11131
11132                 self._add_prefetchers()
11133                 self._add_packages()
11134                 pkg_queue = self._pkg_queue
11135                 failed_pkgs = self._failed_pkgs
11136                 portage.locks._quiet = self._background
11137                 portage.elog._emerge_elog_listener = self._elog_listener
11138                 rval = os.EX_OK
11139
11140                 try:
11141                         self._main_loop()
11142                 finally:
11143                         self._main_loop_cleanup()
11144                         portage.locks._quiet = False
11145                         portage.elog._emerge_elog_listener = None
11146                         if failed_pkgs:
11147                                 rval = failed_pkgs[-1].returncode
11148
11149                 return rval
11150
11151         def _main_loop_cleanup(self):
11152                 del self._pkg_queue[:]
11153                 self._completed_tasks.clear()
11154                 self._deep_system_deps.clear()
11155                 self._unsatisfied_system_deps.clear()
11156                 self._choose_pkg_return_early = False
11157                 self._status_display.reset()
11158                 self._digraph = None
11159                 self._task_queues.fetch.clear()
11160
11161         def _choose_pkg(self):
11162                 """
11163                 Choose a task that has all it's dependencies satisfied.
11164                 """
11165
11166                 if self._choose_pkg_return_early:
11167                         return None
11168
11169                 if self._digraph is None:
11170                         if (self._jobs or self._task_queues.merge) and \
11171                                 not ("--nodeps" in self.myopts and \
11172                                 (self._max_jobs is True or self._max_jobs > 1)):
11173                                 self._choose_pkg_return_early = True
11174                                 return None
11175                         return self._pkg_queue.pop(0)
11176
11177                 if not (self._jobs or self._task_queues.merge):
11178                         return self._pkg_queue.pop(0)
11179
11180                 self._prune_digraph()
11181
11182                 chosen_pkg = None
11183                 later = set(self._pkg_queue)
11184                 for pkg in self._pkg_queue:
11185                         later.remove(pkg)
11186                         if not self._dependent_on_scheduled_merges(pkg, later):
11187                                 chosen_pkg = pkg
11188                                 break
11189
11190                 if chosen_pkg is not None:
11191                         self._pkg_queue.remove(chosen_pkg)
11192
11193                 if chosen_pkg is None:
11194                         # There's no point in searching for a package to
11195                         # choose until at least one of the existing jobs
11196                         # completes.
11197                         self._choose_pkg_return_early = True
11198
11199                 return chosen_pkg
11200
11201         def _dependent_on_scheduled_merges(self, pkg, later):
11202                 """
11203                 Traverse the subgraph of the given packages deep dependencies
11204                 to see if it contains any scheduled merges.
11205                 @param pkg: a package to check dependencies for
11206                 @type pkg: Package
11207                 @param later: packages for which dependence should be ignored
11208                         since they will be merged later than pkg anyway and therefore
11209                         delaying the merge of pkg will not result in a more optimal
11210                         merge order
11211                 @type later: set
11212                 @rtype: bool
11213                 @returns: True if the package is dependent, False otherwise.
11214                 """
11215
11216                 graph = self._digraph
11217                 completed_tasks = self._completed_tasks
11218
11219                 dependent = False
11220                 traversed_nodes = set([pkg])
11221                 direct_deps = graph.child_nodes(pkg)
11222                 node_stack = direct_deps
11223                 direct_deps = frozenset(direct_deps)
11224                 while node_stack:
11225                         node = node_stack.pop()
11226                         if node in traversed_nodes:
11227                                 continue
11228                         traversed_nodes.add(node)
11229                         if not ((node.installed and node.operation == "nomerge") or \
11230                                 (node.operation == "uninstall" and \
11231                                 node not in direct_deps) or \
11232                                 node in completed_tasks or \
11233                                 node in later):
11234                                 dependent = True
11235                                 break
11236                         node_stack.extend(graph.child_nodes(node))
11237
11238                 return dependent
11239
11240         def _allocate_config(self, root):
11241                 """
11242                 Allocate a unique config instance for a task in order
11243                 to prevent interference between parallel tasks.
11244                 """
11245                 if self._config_pool[root]:
11246                         temp_settings = self._config_pool[root].pop()
11247                 else:
11248                         temp_settings = portage.config(clone=self.pkgsettings[root])
11249                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11250                 # performance reasons, call it here to make sure all settings from the
11251                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11252                 temp_settings.reload()
11253                 temp_settings.reset()
11254                 return temp_settings
11255
11256         def _deallocate_config(self, settings):
11257                 self._config_pool[settings["ROOT"]].append(settings)
11258
11259         def _main_loop(self):
11260
11261                 # Only allow 1 job max if a restart is scheduled
11262                 # due to portage update.
11263                 if self._is_restart_scheduled() or \
11264                         self._opts_no_background.intersection(self.myopts):
11265                         self._set_max_jobs(1)
11266
11267                 merge_queue = self._task_queues.merge
11268
11269                 while self._schedule():
11270                         if self._poll_event_handlers:
11271                                 self._poll_loop()
11272
11273                 while True:
11274                         self._schedule()
11275                         if not (self._jobs or merge_queue):
11276                                 break
11277                         if self._poll_event_handlers:
11278                                 self._poll_loop()
11279
11280         def _keep_scheduling(self):
11281                 return bool(self._pkg_queue and \
11282                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11283
11284         def _schedule_tasks(self):
11285
11286                 # When the number of jobs drops to zero, process all waiting merges.
11287                 if not self._jobs and self._merge_wait_queue:
11288                         for task in self._merge_wait_queue:
11289                                 task.addExitListener(self._merge_wait_exit_handler)
11290                                 self._task_queues.merge.add(task)
11291                         self._status_display.merges = len(self._task_queues.merge)
11292                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11293                         del self._merge_wait_queue[:]
11294
11295                 self._schedule_tasks_imp()
11296                 self._status_display.display()
11297
11298                 state_change = 0
11299                 for q in self._task_queues.values():
11300                         if q.schedule():
11301                                 state_change += 1
11302
11303                 # Cancel prefetchers if they're the only reason
11304                 # the main poll loop is still running.
11305                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11306                         not (self._jobs or self._task_queues.merge) and \
11307                         self._task_queues.fetch:
11308                         self._task_queues.fetch.clear()
11309                         state_change += 1
11310
11311                 if state_change:
11312                         self._schedule_tasks_imp()
11313                         self._status_display.display()
11314
11315                 return self._keep_scheduling()
11316
11317         def _job_delay(self):
11318                 """
11319                 @rtype: bool
11320                 @returns: True if job scheduling should be delayed, False otherwise.
11321                 """
11322
11323                 if self._jobs and self._max_load is not None:
11324
11325                         current_time = time.time()
11326
11327                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11328                         if delay > self._job_delay_max:
11329                                 delay = self._job_delay_max
11330                         if (current_time - self._previous_job_start_time) < delay:
11331                                 return True
11332
11333                 return False
11334
11335         def _schedule_tasks_imp(self):
11336                 """
11337                 @rtype: bool
11338                 @returns: True if state changed, False otherwise.
11339                 """
11340
11341                 state_change = 0
11342
11343                 while True:
11344
11345                         if not self._keep_scheduling():
11346                                 return bool(state_change)
11347
11348                         if self._choose_pkg_return_early or \
11349                                 self._merge_wait_scheduled or \
11350                                 (self._jobs and self._unsatisfied_system_deps) or \
11351                                 not self._can_add_job() or \
11352                                 self._job_delay():
11353                                 return bool(state_change)
11354
11355                         pkg = self._choose_pkg()
11356                         if pkg is None:
11357                                 return bool(state_change)
11358
11359                         state_change += 1
11360
11361                         if not pkg.installed:
11362                                 self._pkg_count.curval += 1
11363
11364                         task = self._task(pkg)
11365
11366                         if pkg.installed:
11367                                 merge = PackageMerge(merge=task)
11368                                 merge.addExitListener(self._merge_exit)
11369                                 self._task_queues.merge.add(merge)
11370
11371                         elif pkg.built:
11372                                 self._jobs += 1
11373                                 self._previous_job_start_time = time.time()
11374                                 self._status_display.running = self._jobs
11375                                 task.addExitListener(self._extract_exit)
11376                                 self._task_queues.jobs.add(task)
11377
11378                         else:
11379                                 self._jobs += 1
11380                                 self._previous_job_start_time = time.time()
11381                                 self._status_display.running = self._jobs
11382                                 task.addExitListener(self._build_exit)
11383                                 self._task_queues.jobs.add(task)
11384
11385                 return bool(state_change)
11386
11387         def _task(self, pkg):
11388
11389                 pkg_to_replace = None
11390                 if pkg.operation != "uninstall":
11391                         vardb = pkg.root_config.trees["vartree"].dbapi
11392                         previous_cpv = vardb.match(pkg.slot_atom)
11393                         if previous_cpv:
11394                                 previous_cpv = previous_cpv.pop()
11395                                 pkg_to_replace = self._pkg(previous_cpv,
11396                                         "installed", pkg.root_config, installed=True)
11397
11398                 task = MergeListItem(args_set=self._args_set,
11399                         background=self._background, binpkg_opts=self._binpkg_opts,
11400                         build_opts=self._build_opts,
11401                         config_pool=self._ConfigPool(pkg.root,
11402                         self._allocate_config, self._deallocate_config),
11403                         emerge_opts=self.myopts,
11404                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11405                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11406                         pkg_to_replace=pkg_to_replace,
11407                         prefetcher=self._prefetchers.get(pkg),
11408                         scheduler=self._sched_iface,
11409                         settings=self._allocate_config(pkg.root),
11410                         statusMessage=self._status_msg,
11411                         world_atom=self._world_atom)
11412
11413                 return task
11414
11415         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11416                 pkg = failed_pkg.pkg
11417                 msg = "%s to %s %s" % \
11418                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11419                 if pkg.root != "/":
11420                         msg += " %s %s" % (preposition, pkg.root)
11421
11422                 log_path = self._locate_failure_log(failed_pkg)
11423                 if log_path is not None:
11424                         msg += ", Log file:"
11425                 self._status_msg(msg)
11426
11427                 if log_path is not None:
11428                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11429
11430         def _status_msg(self, msg):
11431                 """
11432                 Display a brief status message (no newlines) in the status display.
11433                 This is called by tasks to provide feedback to the user. This
11434                 delegates the resposibility of generating \r and \n control characters,
11435                 to guarantee that lines are created or erased when necessary and
11436                 appropriate.
11437
11438                 @type msg: str
11439                 @param msg: a brief status message (no newlines allowed)
11440                 """
11441                 if not self._background:
11442                         writemsg_level("\n")
11443                 self._status_display.displayMessage(msg)
11444
11445         def _save_resume_list(self):
11446                 """
11447                 Do this before verifying the ebuild Manifests since it might
11448                 be possible for the user to use --resume --skipfirst get past
11449                 a non-essential package with a broken digest.
11450                 """
11451                 mtimedb = self._mtimedb
11452                 mtimedb["resume"]["mergelist"] = [list(x) \
11453                         for x in self._mergelist \
11454                         if isinstance(x, Package) and x.operation == "merge"]
11455
11456                 mtimedb.commit()
11457
11458         def _calc_resume_list(self):
11459                 """
11460                 Use the current resume list to calculate a new one,
11461                 dropping any packages with unsatisfied deps.
11462                 @rtype: bool
11463                 @returns: True if successful, False otherwise.
11464                 """
11465                 print colorize("GOOD", "*** Resuming merge...")
11466
11467                 if self._show_list():
11468                         if "--tree" in self.myopts:
11469                                 portage.writemsg_stdout("\n" + \
11470                                         darkgreen("These are the packages that " + \
11471                                         "would be merged, in reverse order:\n\n"))
11472
11473                         else:
11474                                 portage.writemsg_stdout("\n" + \
11475                                         darkgreen("These are the packages that " + \
11476                                         "would be merged, in order:\n\n"))
11477
11478                 show_spinner = "--quiet" not in self.myopts and \
11479                         "--nodeps" not in self.myopts
11480
11481                 if show_spinner:
11482                         print "Calculating dependencies  ",
11483
11484                 myparams = create_depgraph_params(self.myopts, None)
11485                 success = False
11486                 e = None
11487                 try:
11488                         success, mydepgraph, dropped_tasks = resume_depgraph(
11489                                 self.settings, self.trees, self._mtimedb, self.myopts,
11490                                 myparams, self._spinner)
11491                 except depgraph.UnsatisfiedResumeDep, exc:
11492                         # rename variable to avoid python-3.0 error:
11493                         # SyntaxError: can not delete variable 'e' referenced in nested
11494                         #              scope
11495                         e = exc
11496                         mydepgraph = e.depgraph
11497                         dropped_tasks = set()
11498
11499                 if show_spinner:
11500                         print "\b\b... done!"
11501
11502                 if e is not None:
11503                         def unsatisfied_resume_dep_msg():
11504                                 mydepgraph.display_problems()
11505                                 out = portage.output.EOutput()
11506                                 out.eerror("One or more packages are either masked or " + \
11507                                         "have missing dependencies:")
11508                                 out.eerror("")
11509                                 indent = "  "
11510                                 show_parents = set()
11511                                 for dep in e.value:
11512                                         if dep.parent in show_parents:
11513                                                 continue
11514                                         show_parents.add(dep.parent)
11515                                         if dep.atom is None:
11516                                                 out.eerror(indent + "Masked package:")
11517                                                 out.eerror(2 * indent + str(dep.parent))
11518                                                 out.eerror("")
11519                                         else:
11520                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11521                                                 out.eerror(2 * indent + str(dep.parent))
11522                                                 out.eerror("")
11523                                 msg = "The resume list contains packages " + \
11524                                         "that are either masked or have " + \
11525                                         "unsatisfied dependencies. " + \
11526                                         "Please restart/continue " + \
11527                                         "the operation manually, or use --skipfirst " + \
11528                                         "to skip the first package in the list and " + \
11529                                         "any other packages that may be " + \
11530                                         "masked or have missing dependencies."
11531                                 for line in textwrap.wrap(msg, 72):
11532                                         out.eerror(line)
11533                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11534                         return False
11535
11536                 if success and self._show_list():
11537                         mylist = mydepgraph.altlist()
11538                         if mylist:
11539                                 if "--tree" in self.myopts:
11540                                         mylist.reverse()
11541                                 mydepgraph.display(mylist, favorites=self._favorites)
11542
11543                 if not success:
11544                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11545                         return False
11546                 mydepgraph.display_problems()
11547
11548                 mylist = mydepgraph.altlist()
11549                 mydepgraph.break_refs(mylist)
11550                 mydepgraph.break_refs(dropped_tasks)
11551                 self._mergelist = mylist
11552                 self._set_digraph(mydepgraph.schedulerGraph())
11553
11554                 msg_width = 75
11555                 for task in dropped_tasks:
11556                         if not (isinstance(task, Package) and task.operation == "merge"):
11557                                 continue
11558                         pkg = task
11559                         msg = "emerge --keep-going:" + \
11560                                 " %s" % (pkg.cpv,)
11561                         if pkg.root != "/":
11562                                 msg += " for %s" % (pkg.root,)
11563                         msg += " dropped due to unsatisfied dependency."
11564                         for line in textwrap.wrap(msg, msg_width):
11565                                 eerror(line, phase="other", key=pkg.cpv)
11566                         settings = self.pkgsettings[pkg.root]
11567                         # Ensure that log collection from $T is disabled inside
11568                         # elog_process(), since any logs that might exist are
11569                         # not valid here.
11570                         settings.pop("T", None)
11571                         portage.elog.elog_process(pkg.cpv, settings)
11572                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11573
11574                 return True
11575
11576         def _show_list(self):
11577                 myopts = self.myopts
11578                 if "--quiet" not in myopts and \
11579                         ("--ask" in myopts or "--tree" in myopts or \
11580                         "--verbose" in myopts):
11581                         return True
11582                 return False
11583
11584         def _world_atom(self, pkg):
11585                 """
11586                 Add the package to the world file, but only if
11587                 it's supposed to be added. Otherwise, do nothing.
11588                 """
11589
11590                 if set(("--buildpkgonly", "--fetchonly",
11591                         "--fetch-all-uri",
11592                         "--oneshot", "--onlydeps",
11593                         "--pretend")).intersection(self.myopts):
11594                         return
11595
11596                 if pkg.root != self.target_root:
11597                         return
11598
11599                 args_set = self._args_set
11600                 if not args_set.findAtomForPackage(pkg):
11601                         return
11602
11603                 logger = self._logger
11604                 pkg_count = self._pkg_count
11605                 root_config = pkg.root_config
11606                 world_set = root_config.sets["world"]
11607                 world_locked = False
11608                 if hasattr(world_set, "lock"):
11609                         world_set.lock()
11610                         world_locked = True
11611
11612                 try:
11613                         if hasattr(world_set, "load"):
11614                                 world_set.load() # maybe it's changed on disk
11615
11616                         atom = create_world_atom(pkg, args_set, root_config)
11617                         if atom:
11618                                 if hasattr(world_set, "add"):
11619                                         self._status_msg(('Recording %s in "world" ' + \
11620                                                 'favorites file...') % atom)
11621                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11622                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11623                                         world_set.add(atom)
11624                                 else:
11625                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11626                                                 (atom,), level=logging.WARN, noiselevel=-1)
11627                 finally:
11628                         if world_locked:
11629                                 world_set.unlock()
11630
11631         def _pkg(self, cpv, type_name, root_config, installed=False):
11632                 """
11633                 Get a package instance from the cache, or create a new
11634                 one if necessary. Raises KeyError from aux_get if it
11635                 failures for some reason (package does not exist or is
11636                 corrupt).
11637                 """
11638                 operation = "merge"
11639                 if installed:
11640                         operation = "nomerge"
11641
11642                 if self._digraph is not None:
11643                         # Reuse existing instance when available.
11644                         pkg = self._digraph.get(
11645                                 (type_name, root_config.root, cpv, operation))
11646                         if pkg is not None:
11647                                 return pkg
11648
11649                 tree_type = depgraph.pkg_tree_map[type_name]
11650                 db = root_config.trees[tree_type].dbapi
11651                 db_keys = list(self.trees[root_config.root][
11652                         tree_type].dbapi._aux_cache_keys)
11653                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11654                 pkg = Package(cpv=cpv, metadata=metadata,
11655                         root_config=root_config, installed=installed)
11656                 if type_name == "ebuild":
11657                         settings = self.pkgsettings[root_config.root]
11658                         settings.setcpv(pkg)
11659                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11660                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11661
11662                 return pkg
11663
11664 class MetadataRegen(PollScheduler):
11665
11666         def __init__(self, portdb, cp_iter=None, consumer=None,
11667                 max_jobs=None, max_load=None):
11668                 PollScheduler.__init__(self)
11669                 self._portdb = portdb
11670                 self._global_cleanse = False
11671                 if cp_iter is None:
11672                         cp_iter = self._iter_every_cp()
11673                         # We can globally cleanse stale cache only if we
11674                         # iterate over every single cp.
11675                         self._global_cleanse = True
11676                 self._cp_iter = cp_iter
11677                 self._consumer = consumer
11678
11679                 if max_jobs is None:
11680                         max_jobs = 1
11681
11682                 self._max_jobs = max_jobs
11683                 self._max_load = max_load
11684                 self._sched_iface = self._sched_iface_class(
11685                         register=self._register,
11686                         schedule=self._schedule_wait,
11687                         unregister=self._unregister)
11688
11689                 self._valid_pkgs = set()
11690                 self._cp_set = set()
11691                 self._process_iter = self._iter_metadata_processes()
11692                 self.returncode = os.EX_OK
11693                 self._error_count = 0
11694
11695         def _iter_every_cp(self):
11696                 every_cp = self._portdb.cp_all()
11697                 every_cp.sort(reverse=True)
11698                 try:
11699                         while True:
11700                                 yield every_cp.pop()
11701                 except IndexError:
11702                         pass
11703
11704         def _iter_metadata_processes(self):
11705                 portdb = self._portdb
11706                 valid_pkgs = self._valid_pkgs
11707                 cp_set = self._cp_set
11708                 consumer = self._consumer
11709
11710                 for cp in self._cp_iter:
11711                         cp_set.add(cp)
11712                         portage.writemsg_stdout("Processing %s\n" % cp)
11713                         cpv_list = portdb.cp_list(cp)
11714                         for cpv in cpv_list:
11715                                 valid_pkgs.add(cpv)
11716                                 ebuild_path, repo_path = portdb.findname2(cpv)
11717                                 metadata, st, emtime = portdb._pull_valid_cache(
11718                                         cpv, ebuild_path, repo_path)
11719                                 if metadata is not None:
11720                                         if consumer is not None:
11721                                                 consumer(cpv, ebuild_path,
11722                                                         repo_path, metadata)
11723                                         continue
11724
11725                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11726                                         ebuild_mtime=emtime,
11727                                         metadata_callback=portdb._metadata_callback,
11728                                         portdb=portdb, repo_path=repo_path,
11729                                         settings=portdb.doebuild_settings)
11730
11731         def run(self):
11732
11733                 portdb = self._portdb
11734                 from portage.cache.cache_errors import CacheError
11735                 dead_nodes = {}
11736
11737                 while self._schedule():
11738                         self._poll_loop()
11739
11740                 while self._jobs:
11741                         self._poll_loop()
11742
11743                 if self._global_cleanse:
11744                         for mytree in portdb.porttrees:
11745                                 try:
11746                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11747                                 except CacheError, e:
11748                                         portage.writemsg("Error listing cache entries for " + \
11749                                                 "'%s': %s, continuing...\n" % (mytree, e),
11750                                                 noiselevel=-1)
11751                                         del e
11752                                         dead_nodes = None
11753                                         break
11754                 else:
11755                         cp_set = self._cp_set
11756                         cpv_getkey = portage.cpv_getkey
11757                         for mytree in portdb.porttrees:
11758                                 try:
11759                                         dead_nodes[mytree] = set(cpv for cpv in \
11760                                                 portdb.auxdb[mytree].iterkeys() \
11761                                                 if cpv_getkey(cpv) in cp_set)
11762                                 except CacheError, e:
11763                                         portage.writemsg("Error listing cache entries for " + \
11764                                                 "'%s': %s, continuing...\n" % (mytree, e),
11765                                                 noiselevel=-1)
11766                                         del e
11767                                         dead_nodes = None
11768                                         break
11769
11770                 if dead_nodes:
11771                         for y in self._valid_pkgs:
11772                                 for mytree in portdb.porttrees:
11773                                         if portdb.findname2(y, mytree=mytree)[0]:
11774                                                 dead_nodes[mytree].discard(y)
11775
11776                         for mytree, nodes in dead_nodes.iteritems():
11777                                 auxdb = portdb.auxdb[mytree]
11778                                 for y in nodes:
11779                                         try:
11780                                                 del auxdb[y]
11781                                         except (KeyError, CacheError):
11782                                                 pass
11783
11784         def _schedule_tasks(self):
11785                 """
11786                 @rtype: bool
11787                 @returns: True if there may be remaining tasks to schedule,
11788                         False otherwise.
11789                 """
11790                 while self._can_add_job():
11791                         try:
11792                                 metadata_process = self._process_iter.next()
11793                         except StopIteration:
11794                                 return False
11795
11796                         self._jobs += 1
11797                         metadata_process.scheduler = self._sched_iface
11798                         metadata_process.addExitListener(self._metadata_exit)
11799                         metadata_process.start()
11800                 return True
11801
11802         def _metadata_exit(self, metadata_process):
11803                 self._jobs -= 1
11804                 if metadata_process.returncode != os.EX_OK:
11805                         self.returncode = 1
11806                         self._error_count += 1
11807                         self._valid_pkgs.discard(metadata_process.cpv)
11808                         portage.writemsg("Error processing %s, continuing...\n" % \
11809                                 (metadata_process.cpv,), noiselevel=-1)
11810
11811                 if self._consumer is not None:
11812                         # On failure, still notify the consumer (in this case the metadata
11813                         # argument is None).
11814                         self._consumer(metadata_process.cpv,
11815                                 metadata_process.ebuild_path,
11816                                 metadata_process.repo_path,
11817                                 metadata_process.metadata)
11818
11819                 self._schedule()
11820
11821 class UninstallFailure(portage.exception.PortageException):
11822         """
11823         An instance of this class is raised by unmerge() when
11824         an uninstallation fails.
11825         """
11826         status = 1
11827         def __init__(self, *pargs):
11828                 portage.exception.PortageException.__init__(self, pargs)
11829                 if pargs:
11830                         self.status = pargs[0]
11831
11832 def unmerge(root_config, myopts, unmerge_action,
11833         unmerge_files, ldpath_mtimes, autoclean=0,
11834         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11835         scheduler=None, writemsg_level=portage.util.writemsg_level):
11836
11837         quiet = "--quiet" in myopts
11838         settings = root_config.settings
11839         sets = root_config.sets
11840         vartree = root_config.trees["vartree"]
11841         candidate_catpkgs=[]
11842         global_unmerge=0
11843         xterm_titles = "notitles" not in settings.features
11844         out = portage.output.EOutput()
11845         pkg_cache = {}
11846         db_keys = list(vartree.dbapi._aux_cache_keys)
11847
11848         def _pkg(cpv):
11849                 pkg = pkg_cache.get(cpv)
11850                 if pkg is None:
11851                         pkg = Package(cpv=cpv, installed=True,
11852                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11853                                 root_config=root_config,
11854                                 type_name="installed")
11855                         pkg_cache[cpv] = pkg
11856                 return pkg
11857
11858         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11859         try:
11860                 # At least the parent needs to exist for the lock file.
11861                 portage.util.ensure_dirs(vdb_path)
11862         except portage.exception.PortageException:
11863                 pass
11864         vdb_lock = None
11865         try:
11866                 if os.access(vdb_path, os.W_OK):
11867                         vdb_lock = portage.locks.lockdir(vdb_path)
11868                 realsyslist = sets["system"].getAtoms()
11869                 syslist = []
11870                 for x in realsyslist:
11871                         mycp = portage.dep_getkey(x)
11872                         if mycp in settings.getvirtuals():
11873                                 providers = []
11874                                 for provider in settings.getvirtuals()[mycp]:
11875                                         if vartree.dbapi.match(provider):
11876                                                 providers.append(provider)
11877                                 if len(providers) == 1:
11878                                         syslist.extend(providers)
11879                         else:
11880                                 syslist.append(mycp)
11881         
11882                 mysettings = portage.config(clone=settings)
11883         
11884                 if not unmerge_files:
11885                         if unmerge_action == "unmerge":
11886                                 print
11887                                 print bold("emerge unmerge") + " can only be used with specific package names"
11888                                 print
11889                                 return 0
11890                         else:
11891                                 global_unmerge = 1
11892         
11893                 localtree = vartree
11894                 # process all arguments and add all
11895                 # valid db entries to candidate_catpkgs
11896                 if global_unmerge:
11897                         if not unmerge_files:
11898                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11899                 else:
11900                         #we've got command-line arguments
11901                         if not unmerge_files:
11902                                 print "\nNo packages to unmerge have been provided.\n"
11903                                 return 0
11904                         for x in unmerge_files:
11905                                 arg_parts = x.split('/')
11906                                 if x[0] not in [".","/"] and \
11907                                         arg_parts[-1][-7:] != ".ebuild":
11908                                         #possible cat/pkg or dep; treat as such
11909                                         candidate_catpkgs.append(x)
11910                                 elif unmerge_action in ["prune","clean"]:
11911                                         print "\n!!! Prune and clean do not accept individual" + \
11912                                                 " ebuilds as arguments;\n    skipping.\n"
11913                                         continue
11914                                 else:
11915                                         # it appears that the user is specifying an installed
11916                                         # ebuild and we're in "unmerge" mode, so it's ok.
11917                                         if not os.path.exists(x):
11918                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11919                                                 return 0
11920         
11921                                         absx   = os.path.abspath(x)
11922                                         sp_absx = absx.split("/")
11923                                         if sp_absx[-1][-7:] == ".ebuild":
11924                                                 del sp_absx[-1]
11925                                                 absx = "/".join(sp_absx)
11926         
11927                                         sp_absx_len = len(sp_absx)
11928         
11929                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11930                                         vdb_len  = len(vdb_path)
11931         
11932                                         sp_vdb     = vdb_path.split("/")
11933                                         sp_vdb_len = len(sp_vdb)
11934         
11935                                         if not os.path.exists(absx+"/CONTENTS"):
11936                                                 print "!!! Not a valid db dir: "+str(absx)
11937                                                 return 0
11938         
11939                                         if sp_absx_len <= sp_vdb_len:
11940                                                 # The Path is shorter... so it can't be inside the vdb.
11941                                                 print sp_absx
11942                                                 print absx
11943                                                 print "\n!!!",x,"cannot be inside "+ \
11944                                                         vdb_path+"; aborting.\n"
11945                                                 return 0
11946         
11947                                         for idx in range(0,sp_vdb_len):
11948                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11949                                                         print sp_absx
11950                                                         print absx
11951                                                         print "\n!!!", x, "is not inside "+\
11952                                                                 vdb_path+"; aborting.\n"
11953                                                         return 0
11954         
11955                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11956                                         candidate_catpkgs.append(
11957                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11958         
11959                 newline=""
11960                 if (not "--quiet" in myopts):
11961                         newline="\n"
11962                 if settings["ROOT"] != "/":
11963                         writemsg_level(darkgreen(newline+ \
11964                                 ">>> Using system located in ROOT tree %s\n" % \
11965                                 settings["ROOT"]))
11966
11967                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11968                         not ("--quiet" in myopts):
11969                         writemsg_level(darkgreen(newline+\
11970                                 ">>> These are the packages that would be unmerged:\n"))
11971
11972                 # Preservation of order is required for --depclean and --prune so
11973                 # that dependencies are respected. Use all_selected to eliminate
11974                 # duplicate packages since the same package may be selected by
11975                 # multiple atoms.
11976                 pkgmap = []
11977                 all_selected = set()
11978                 for x in candidate_catpkgs:
11979                         # cycle through all our candidate deps and determine
11980                         # what will and will not get unmerged
11981                         try:
11982                                 mymatch = vartree.dbapi.match(x)
11983                         except portage.exception.AmbiguousPackageName, errpkgs:
11984                                 print "\n\n!!! The short ebuild name \"" + \
11985                                         x + "\" is ambiguous.  Please specify"
11986                                 print "!!! one of the following fully-qualified " + \
11987                                         "ebuild names instead:\n"
11988                                 for i in errpkgs[0]:
11989                                         print "    " + green(i)
11990                                 print
11991                                 sys.exit(1)
11992         
11993                         if not mymatch and x[0] not in "<>=~":
11994                                 mymatch = localtree.dep_match(x)
11995                         if not mymatch:
11996                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11997                                         (x, unmerge_action), noiselevel=-1)
11998                                 continue
11999
12000                         pkgmap.append(
12001                                 {"protected": set(), "selected": set(), "omitted": set()})
12002                         mykey = len(pkgmap) - 1
12003                         if unmerge_action=="unmerge":
12004                                         for y in mymatch:
12005                                                 if y not in all_selected:
12006                                                         pkgmap[mykey]["selected"].add(y)
12007                                                         all_selected.add(y)
12008                         elif unmerge_action == "prune":
12009                                 if len(mymatch) == 1:
12010                                         continue
12011                                 best_version = mymatch[0]
12012                                 best_slot = vartree.getslot(best_version)
12013                                 best_counter = vartree.dbapi.cpv_counter(best_version)
12014                                 for mypkg in mymatch[1:]:
12015                                         myslot = vartree.getslot(mypkg)
12016                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
12017                                         if (myslot == best_slot and mycounter > best_counter) or \
12018                                                 mypkg == portage.best([mypkg, best_version]):
12019                                                 if myslot == best_slot:
12020                                                         if mycounter < best_counter:
12021                                                                 # On slot collision, keep the one with the
12022                                                                 # highest counter since it is the most
12023                                                                 # recently installed.
12024                                                                 continue
12025                                                 best_version = mypkg
12026                                                 best_slot = myslot
12027                                                 best_counter = mycounter
12028                                 pkgmap[mykey]["protected"].add(best_version)
12029                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12030                                         if mypkg != best_version and mypkg not in all_selected)
12031                                 all_selected.update(pkgmap[mykey]["selected"])
12032                         else:
12033                                 # unmerge_action == "clean"
12034                                 slotmap={}
12035                                 for mypkg in mymatch:
12036                                         if unmerge_action == "clean":
12037                                                 myslot = localtree.getslot(mypkg)
12038                                         else:
12039                                                 # since we're pruning, we don't care about slots
12040                                                 # and put all the pkgs in together
12041                                                 myslot = 0
12042                                         if myslot not in slotmap:
12043                                                 slotmap[myslot] = {}
12044                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12045
12046                                 for mypkg in vartree.dbapi.cp_list(
12047                                         portage.dep_getkey(mymatch[0])):
12048                                         myslot = vartree.getslot(mypkg)
12049                                         if myslot not in slotmap:
12050                                                 slotmap[myslot] = {}
12051                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12052
12053                                 for myslot in slotmap:
12054                                         counterkeys = slotmap[myslot].keys()
12055                                         if not counterkeys:
12056                                                 continue
12057                                         counterkeys.sort()
12058                                         pkgmap[mykey]["protected"].add(
12059                                                 slotmap[myslot][counterkeys[-1]])
12060                                         del counterkeys[-1]
12061
12062                                         for counter in counterkeys[:]:
12063                                                 mypkg = slotmap[myslot][counter]
12064                                                 if mypkg not in mymatch:
12065                                                         counterkeys.remove(counter)
12066                                                         pkgmap[mykey]["protected"].add(
12067                                                                 slotmap[myslot][counter])
12068
12069                                         #be pretty and get them in order of merge:
12070                                         for ckey in counterkeys:
12071                                                 mypkg = slotmap[myslot][ckey]
12072                                                 if mypkg not in all_selected:
12073                                                         pkgmap[mykey]["selected"].add(mypkg)
12074                                                         all_selected.add(mypkg)
12075                                         # ok, now the last-merged package
12076                                         # is protected, and the rest are selected
12077                 numselected = len(all_selected)
12078                 if global_unmerge and not numselected:
12079                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12080                         return 0
12081         
12082                 if not numselected:
12083                         portage.writemsg_stdout(
12084                                 "\n>>> No packages selected for removal by " + \
12085                                 unmerge_action + "\n")
12086                         return 0
12087         finally:
12088                 if vdb_lock:
12089                         vartree.dbapi.flush_cache()
12090                         portage.locks.unlockdir(vdb_lock)
12091         
12092         from portage.sets.base import EditablePackageSet
12093         
12094         # generate a list of package sets that are directly or indirectly listed in "world",
12095         # as there is no persistent list of "installed" sets
12096         installed_sets = ["world"]
12097         stop = False
12098         pos = 0
12099         while not stop:
12100                 stop = True
12101                 pos = len(installed_sets)
12102                 for s in installed_sets[pos - 1:]:
12103                         if s not in sets:
12104                                 continue
12105                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12106                         if candidates:
12107                                 stop = False
12108                                 installed_sets += candidates
12109         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12110         del stop, pos
12111
12112         # we don't want to unmerge packages that are still listed in user-editable package sets
12113         # listed in "world" as they would be remerged on the next update of "world" or the 
12114         # relevant package sets.
12115         unknown_sets = set()
12116         for cp in xrange(len(pkgmap)):
12117                 for cpv in pkgmap[cp]["selected"].copy():
12118                         try:
12119                                 pkg = _pkg(cpv)
12120                         except KeyError:
12121                                 # It could have been uninstalled
12122                                 # by a concurrent process.
12123                                 continue
12124
12125                         if unmerge_action != "clean" and \
12126                                 root_config.root == "/" and \
12127                                 portage.match_from_list(
12128                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12129                                 msg = ("Not unmerging package %s since there is no valid " + \
12130                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12131                                 for line in textwrap.wrap(msg, 75):
12132                                         out.eerror(line)
12133                                 # adjust pkgmap so the display output is correct
12134                                 pkgmap[cp]["selected"].remove(cpv)
12135                                 all_selected.remove(cpv)
12136                                 pkgmap[cp]["protected"].add(cpv)
12137                                 continue
12138
12139                         parents = []
12140                         for s in installed_sets:
12141                                 # skip sets that the user requested to unmerge, and skip world 
12142                                 # unless we're unmerging a package set (as the package would be 
12143                                 # removed from "world" later on)
12144                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12145                                         continue
12146
12147                                 if s not in sets:
12148                                         if s in unknown_sets:
12149                                                 continue
12150                                         unknown_sets.add(s)
12151                                         out = portage.output.EOutput()
12152                                         out.eerror(("Unknown set '@%s' in " + \
12153                                                 "%svar/lib/portage/world_sets") % \
12154                                                 (s, root_config.root))
12155                                         continue
12156
12157                                 # only check instances of EditablePackageSet as other classes are generally used for
12158                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12159                                 # user can't do much about them anyway)
12160                                 if isinstance(sets[s], EditablePackageSet):
12161
12162                                         # This is derived from a snippet of code in the
12163                                         # depgraph._iter_atoms_for_pkg() method.
12164                                         for atom in sets[s].iterAtomsForPackage(pkg):
12165                                                 inst_matches = vartree.dbapi.match(atom)
12166                                                 inst_matches.reverse() # descending order
12167                                                 higher_slot = None
12168                                                 for inst_cpv in inst_matches:
12169                                                         try:
12170                                                                 inst_pkg = _pkg(inst_cpv)
12171                                                         except KeyError:
12172                                                                 # It could have been uninstalled
12173                                                                 # by a concurrent process.
12174                                                                 continue
12175
12176                                                         if inst_pkg.cp != atom.cp:
12177                                                                 continue
12178                                                         if pkg >= inst_pkg:
12179                                                                 # This is descending order, and we're not
12180                                                                 # interested in any versions <= pkg given.
12181                                                                 break
12182                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12183                                                                 higher_slot = inst_pkg
12184                                                                 break
12185                                                 if higher_slot is None:
12186                                                         parents.append(s)
12187                                                         break
12188                         if parents:
12189                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12190                                 #print colorize("WARN", "but still listed in the following package sets:")
12191                                 #print "    %s\n" % ", ".join(parents)
12192                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12193                                 print colorize("WARN", "still referenced by the following package sets:")
12194                                 print "    %s\n" % ", ".join(parents)
12195                                 # adjust pkgmap so the display output is correct
12196                                 pkgmap[cp]["selected"].remove(cpv)
12197                                 all_selected.remove(cpv)
12198                                 pkgmap[cp]["protected"].add(cpv)
12199         
12200         del installed_sets
12201
12202         numselected = len(all_selected)
12203         if not numselected:
12204                 writemsg_level(
12205                         "\n>>> No packages selected for removal by " + \
12206                         unmerge_action + "\n")
12207                 return 0
12208
12209         # Unmerge order only matters in some cases
12210         if not ordered:
12211                 unordered = {}
12212                 for d in pkgmap:
12213                         selected = d["selected"]
12214                         if not selected:
12215                                 continue
12216                         cp = portage.cpv_getkey(iter(selected).next())
12217                         cp_dict = unordered.get(cp)
12218                         if cp_dict is None:
12219                                 cp_dict = {}
12220                                 unordered[cp] = cp_dict
12221                                 for k in d:
12222                                         cp_dict[k] = set()
12223                         for k, v in d.iteritems():
12224                                 cp_dict[k].update(v)
12225                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12226
12227         for x in xrange(len(pkgmap)):
12228                 selected = pkgmap[x]["selected"]
12229                 if not selected:
12230                         continue
12231                 for mytype, mylist in pkgmap[x].iteritems():
12232                         if mytype == "selected":
12233                                 continue
12234                         mylist.difference_update(all_selected)
12235                 cp = portage.cpv_getkey(iter(selected).next())
12236                 for y in localtree.dep_match(cp):
12237                         if y not in pkgmap[x]["omitted"] and \
12238                                 y not in pkgmap[x]["selected"] and \
12239                                 y not in pkgmap[x]["protected"] and \
12240                                 y not in all_selected:
12241                                 pkgmap[x]["omitted"].add(y)
12242                 if global_unmerge and not pkgmap[x]["selected"]:
12243                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12244                         continue
12245                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12246                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12247                                 "'%s' is part of your system profile.\n" % cp),
12248                                 level=logging.WARNING, noiselevel=-1)
12249                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12250                                 "be damaging to your system.\n\n"),
12251                                 level=logging.WARNING, noiselevel=-1)
12252                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12253                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12254                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12255                 if not quiet:
12256                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12257                 else:
12258                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12259                 for mytype in ["selected","protected","omitted"]:
12260                         if not quiet:
12261                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12262                         if pkgmap[x][mytype]:
12263                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12264                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12265                                 for pn, ver, rev in sorted_pkgs:
12266                                         if rev == "r0":
12267                                                 myversion = ver
12268                                         else:
12269                                                 myversion = ver + "-" + rev
12270                                         if mytype == "selected":
12271                                                 writemsg_level(
12272                                                         colorize("UNMERGE_WARN", myversion + " "),
12273                                                         noiselevel=-1)
12274                                         else:
12275                                                 writemsg_level(
12276                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12277                         else:
12278                                 writemsg_level("none ", noiselevel=-1)
12279                         if not quiet:
12280                                 writemsg_level("\n", noiselevel=-1)
12281                 if quiet:
12282                         writemsg_level("\n", noiselevel=-1)
12283
12284         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12285                 " packages are slated for removal.\n")
12286         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12287                         " and " + colorize("GOOD", "'omitted'") + \
12288                         " packages will not be removed.\n\n")
12289
12290         if "--pretend" in myopts:
12291                 #we're done... return
12292                 return 0
12293         if "--ask" in myopts:
12294                 if userquery("Would you like to unmerge these packages?")=="No":
12295                         # enter pretend mode for correct formatting of results
12296                         myopts["--pretend"] = True
12297                         print
12298                         print "Quitting."
12299                         print
12300                         return 0
12301         #the real unmerging begins, after a short delay....
12302         if clean_delay and not autoclean:
12303                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12304
12305         for x in xrange(len(pkgmap)):
12306                 for y in pkgmap[x]["selected"]:
12307                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12308                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12309                         mysplit = y.split("/")
12310                         #unmerge...
12311                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12312                                 mysettings, unmerge_action not in ["clean","prune"],
12313                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12314                                 scheduler=scheduler)
12315
12316                         if retval != os.EX_OK:
12317                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12318                                 if raise_on_error:
12319                                         raise UninstallFailure(retval)
12320                                 sys.exit(retval)
12321                         else:
12322                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12323                                         sets["world"].cleanPackage(vartree.dbapi, y)
12324                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12325         if clean_world and hasattr(sets["world"], "remove"):
12326                 for s in root_config.setconfig.active:
12327                         sets["world"].remove(SETPREFIX+s)
12328         return 1
12329
12330 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12331
12332         if os.path.exists("/usr/bin/install-info"):
12333                 out = portage.output.EOutput()
12334                 regen_infodirs=[]
12335                 for z in infodirs:
12336                         if z=='':
12337                                 continue
12338                         inforoot=normpath(root+z)
12339                         if os.path.isdir(inforoot):
12340                                 infomtime = long(os.stat(inforoot).st_mtime)
12341                                 if inforoot not in prev_mtimes or \
12342                                         prev_mtimes[inforoot] != infomtime:
12343                                                 regen_infodirs.append(inforoot)
12344
12345                 if not regen_infodirs:
12346                         portage.writemsg_stdout("\n")
12347                         out.einfo("GNU info directory index is up-to-date.")
12348                 else:
12349                         portage.writemsg_stdout("\n")
12350                         out.einfo("Regenerating GNU info directory index...")
12351
12352                         dir_extensions = ("", ".gz", ".bz2")
12353                         icount=0
12354                         badcount=0
12355                         errmsg = ""
12356                         for inforoot in regen_infodirs:
12357                                 if inforoot=='':
12358                                         continue
12359
12360                                 if not os.path.isdir(inforoot) or \
12361                                         not os.access(inforoot, os.W_OK):
12362                                         continue
12363
12364                                 file_list = os.listdir(inforoot)
12365                                 file_list.sort()
12366                                 dir_file = os.path.join(inforoot, "dir")
12367                                 moved_old_dir = False
12368                                 processed_count = 0
12369                                 for x in file_list:
12370                                         if x.startswith(".") or \
12371                                                 os.path.isdir(os.path.join(inforoot, x)):
12372                                                 continue
12373                                         if x.startswith("dir"):
12374                                                 skip = False
12375                                                 for ext in dir_extensions:
12376                                                         if x == "dir" + ext or \
12377                                                                 x == "dir" + ext + ".old":
12378                                                                 skip = True
12379                                                                 break
12380                                                 if skip:
12381                                                         continue
12382                                         if processed_count == 0:
12383                                                 for ext in dir_extensions:
12384                                                         try:
12385                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12386                                                                 moved_old_dir = True
12387                                                         except EnvironmentError, e:
12388                                                                 if e.errno != errno.ENOENT:
12389                                                                         raise
12390                                                                 del e
12391                                         processed_count += 1
12392                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12393                                         existsstr="already exists, for file `"
12394                                         if myso!="":
12395                                                 if re.search(existsstr,myso):
12396                                                         # Already exists... Don't increment the count for this.
12397                                                         pass
12398                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12399                                                         # This info file doesn't contain a DIR-header: install-info produces this
12400                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12401                                                         # Don't increment the count for this.
12402                                                         pass
12403                                                 else:
12404                                                         badcount=badcount+1
12405                                                         errmsg += myso + "\n"
12406                                         icount=icount+1
12407
12408                                 if moved_old_dir and not os.path.exists(dir_file):
12409                                         # We didn't generate a new dir file, so put the old file
12410                                         # back where it was originally found.
12411                                         for ext in dir_extensions:
12412                                                 try:
12413                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12414                                                 except EnvironmentError, e:
12415                                                         if e.errno != errno.ENOENT:
12416                                                                 raise
12417                                                         del e
12418
12419                                 # Clean dir.old cruft so that they don't prevent
12420                                 # unmerge of otherwise empty directories.
12421                                 for ext in dir_extensions:
12422                                         try:
12423                                                 os.unlink(dir_file + ext + ".old")
12424                                         except EnvironmentError, e:
12425                                                 if e.errno != errno.ENOENT:
12426                                                         raise
12427                                                 del e
12428
12429                                 #update mtime so we can potentially avoid regenerating.
12430                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12431
12432                         if badcount:
12433                                 out.eerror("Processed %d info files; %d errors." % \
12434                                         (icount, badcount))
12435                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12436                         else:
12437                                 if icount > 0:
12438                                         out.einfo("Processed %d info files." % (icount,))
12439
12440
12441 def display_news_notification(root_config, myopts):
12442         target_root = root_config.root
12443         trees = root_config.trees
12444         settings = trees["vartree"].settings
12445         portdb = trees["porttree"].dbapi
12446         vardb = trees["vartree"].dbapi
12447         NEWS_PATH = os.path.join("metadata", "news")
12448         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12449         newsReaderDisplay = False
12450         update = "--pretend" not in myopts
12451
12452         for repo in portdb.getRepositories():
12453                 unreadItems = checkUpdatedNewsItems(
12454                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12455                 if unreadItems:
12456                         if not newsReaderDisplay:
12457                                 newsReaderDisplay = True
12458                                 print
12459                         print colorize("WARN", " * IMPORTANT:"),
12460                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12461                         
12462         
12463         if newsReaderDisplay:
12464                 print colorize("WARN", " *"),
12465                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12466                 print
12467
12468 def display_preserved_libs(vardbapi):
12469         MAX_DISPLAY = 3
12470
12471         # Ensure the registry is consistent with existing files.
12472         vardbapi.plib_registry.pruneNonExisting()
12473
12474         if vardbapi.plib_registry.hasEntries():
12475                 print
12476                 print colorize("WARN", "!!!") + " existing preserved libs:"
12477                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12478                 linkmap = vardbapi.linkmap
12479                 consumer_map = {}
12480                 owners = {}
12481                 linkmap_broken = False
12482
12483                 try:
12484                         linkmap.rebuild()
12485                 except portage.exception.CommandNotFound, e:
12486                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12487                                 level=logging.ERROR, noiselevel=-1)
12488                         del e
12489                         linkmap_broken = True
12490                 else:
12491                         search_for_owners = set()
12492                         for cpv in plibdata:
12493                                 internal_plib_keys = set(linkmap._obj_key(f) \
12494                                         for f in plibdata[cpv])
12495                                 for f in plibdata[cpv]:
12496                                         if f in consumer_map:
12497                                                 continue
12498                                         consumers = []
12499                                         for c in linkmap.findConsumers(f):
12500                                                 # Filter out any consumers that are also preserved libs
12501                                                 # belonging to the same package as the provider.
12502                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12503                                                         consumers.append(c)
12504                                         consumers.sort()
12505                                         consumer_map[f] = consumers
12506                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12507
12508                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12509
12510                 for cpv in plibdata:
12511                         print colorize("WARN", ">>>") + " package: %s" % cpv
12512                         samefile_map = {}
12513                         for f in plibdata[cpv]:
12514                                 obj_key = linkmap._obj_key(f)
12515                                 alt_paths = samefile_map.get(obj_key)
12516                                 if alt_paths is None:
12517                                         alt_paths = set()
12518                                         samefile_map[obj_key] = alt_paths
12519                                 alt_paths.add(f)
12520
12521                         for alt_paths in samefile_map.itervalues():
12522                                 alt_paths = sorted(alt_paths)
12523                                 for p in alt_paths:
12524                                         print colorize("WARN", " * ") + " - %s" % (p,)
12525                                 f = alt_paths[0]
12526                                 consumers = consumer_map.get(f, [])
12527                                 for c in consumers[:MAX_DISPLAY]:
12528                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12529                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12530                                 if len(consumers) == MAX_DISPLAY + 1:
12531                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12532                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12533                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12534                                 elif len(consumers) > MAX_DISPLAY:
12535                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12536                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12537
12538
12539 def _flush_elog_mod_echo():
12540         """
12541         Dump the mod_echo output now so that our other
12542         notifications are shown last.
12543         @rtype: bool
12544         @returns: True if messages were shown, False otherwise.
12545         """
12546         messages_shown = False
12547         try:
12548                 from portage.elog import mod_echo
12549         except ImportError:
12550                 pass # happens during downgrade to a version without the module
12551         else:
12552                 messages_shown = bool(mod_echo._items)
12553                 mod_echo.finalize()
12554         return messages_shown
12555
12556 def post_emerge(root_config, myopts, mtimedb, retval):
12557         """
12558         Misc. things to run at the end of a merge session.
12559         
12560         Update Info Files
12561         Update Config Files
12562         Update News Items
12563         Commit mtimeDB
12564         Display preserved libs warnings
12565         Exit Emerge
12566
12567         @param trees: A dictionary mapping each ROOT to it's package databases
12568         @type trees: dict
12569         @param mtimedb: The mtimeDB to store data needed across merge invocations
12570         @type mtimedb: MtimeDB class instance
12571         @param retval: Emerge's return value
12572         @type retval: Int
12573         @rype: None
12574         @returns:
12575         1.  Calls sys.exit(retval)
12576         """
12577
12578         target_root = root_config.root
12579         trees = { target_root : root_config.trees }
12580         vardbapi = trees[target_root]["vartree"].dbapi
12581         settings = vardbapi.settings
12582         info_mtimes = mtimedb["info"]
12583
12584         # Load the most current variables from ${ROOT}/etc/profile.env
12585         settings.unlock()
12586         settings.reload()
12587         settings.regenerate()
12588         settings.lock()
12589
12590         config_protect = settings.get("CONFIG_PROTECT","").split()
12591         infodirs = settings.get("INFOPATH","").split(":") + \
12592                 settings.get("INFODIR","").split(":")
12593
12594         os.chdir("/")
12595
12596         if retval == os.EX_OK:
12597                 exit_msg = " *** exiting successfully."
12598         else:
12599                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12600         emergelog("notitles" not in settings.features, exit_msg)
12601
12602         _flush_elog_mod_echo()
12603
12604         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12605         if "--pretend" in myopts or (counter_hash is not None and \
12606                 counter_hash == vardbapi._counter_hash()):
12607                 display_news_notification(root_config, myopts)
12608                 # If vdb state has not changed then there's nothing else to do.
12609                 sys.exit(retval)
12610
12611         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12612         portage.util.ensure_dirs(vdb_path)
12613         vdb_lock = None
12614         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12615                 vdb_lock = portage.locks.lockdir(vdb_path)
12616
12617         if vdb_lock:
12618                 try:
12619                         if "noinfo" not in settings.features:
12620                                 chk_updated_info_files(target_root,
12621                                         infodirs, info_mtimes, retval)
12622                         mtimedb.commit()
12623                 finally:
12624                         if vdb_lock:
12625                                 portage.locks.unlockdir(vdb_lock)
12626
12627         chk_updated_cfg_files(target_root, config_protect)
12628         
12629         display_news_notification(root_config, myopts)
12630         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12631                 display_preserved_libs(vardbapi)        
12632
12633         sys.exit(retval)
12634
12635
12636 def chk_updated_cfg_files(target_root, config_protect):
12637         if config_protect:
12638                 #number of directories with some protect files in them
12639                 procount=0
12640                 for x in config_protect:
12641                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12642                         if not os.access(x, os.W_OK):
12643                                 # Avoid Permission denied errors generated
12644                                 # later by `find`.
12645                                 continue
12646                         try:
12647                                 mymode = os.lstat(x).st_mode
12648                         except OSError:
12649                                 continue
12650                         if stat.S_ISLNK(mymode):
12651                                 # We want to treat it like a directory if it
12652                                 # is a symlink to an existing directory.
12653                                 try:
12654                                         real_mode = os.stat(x).st_mode
12655                                         if stat.S_ISDIR(real_mode):
12656                                                 mymode = real_mode
12657                                 except OSError:
12658                                         pass
12659                         if stat.S_ISDIR(mymode):
12660                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12661                         else:
12662                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12663                                         os.path.split(x.rstrip(os.path.sep))
12664                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12665                         a = commands.getstatusoutput(mycommand)
12666                         if a[0] != 0:
12667                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12668                                 sys.stderr.flush()
12669                                 # Show the error message alone, sending stdout to /dev/null.
12670                                 os.system(mycommand + " 1>/dev/null")
12671                         else:
12672                                 files = a[1].split('\0')
12673                                 # split always produces an empty string as the last element
12674                                 if files and not files[-1]:
12675                                         del files[-1]
12676                                 if files:
12677                                         procount += 1
12678                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12679                                         if stat.S_ISDIR(mymode):
12680                                                  print "%d config files in '%s' need updating." % \
12681                                                         (len(files), x)
12682                                         else:
12683                                                  print "config file '%s' needs updating." % x
12684
12685                 if procount:
12686                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12687                                 " section of the " + bold("emerge")
12688                         print " "+yellow("*")+" man page to learn how to update config files."
12689
12690 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12691         update=False):
12692         """
12693         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12694         Returns the number of unread (yet relevent) items.
12695         
12696         @param portdb: a portage tree database
12697         @type portdb: pordbapi
12698         @param vardb: an installed package database
12699         @type vardb: vardbapi
12700         @param NEWS_PATH:
12701         @type NEWS_PATH:
12702         @param UNREAD_PATH:
12703         @type UNREAD_PATH:
12704         @param repo_id:
12705         @type repo_id:
12706         @rtype: Integer
12707         @returns:
12708         1.  The number of unread but relevant news items.
12709         
12710         """
12711         from portage.news import NewsManager
12712         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12713         return manager.getUnreadItems( repo_id, update=update )
12714
12715 def insert_category_into_atom(atom, category):
12716         alphanum = re.search(r'\w', atom)
12717         if alphanum:
12718                 ret = atom[:alphanum.start()] + "%s/" % category + \
12719                         atom[alphanum.start():]
12720         else:
12721                 ret = None
12722         return ret
12723
12724 def is_valid_package_atom(x):
12725         if "/" not in x:
12726                 alphanum = re.search(r'\w', x)
12727                 if alphanum:
12728                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12729         return portage.isvalidatom(x)
12730
12731 def show_blocker_docs_link():
12732         print
12733         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12734         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12735         print
12736         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12737         print
12738
12739 def show_mask_docs():
12740         print "For more information, see the MASKED PACKAGES section in the emerge"
12741         print "man page or refer to the Gentoo Handbook."
12742
12743 def action_sync(settings, trees, mtimedb, myopts, myaction):
12744         xterm_titles = "notitles" not in settings.features
12745         emergelog(xterm_titles, " === sync")
12746         myportdir = settings.get("PORTDIR", None)
12747         out = portage.output.EOutput()
12748         if not myportdir:
12749                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12750                 sys.exit(1)
12751         if myportdir[-1]=="/":
12752                 myportdir=myportdir[:-1]
12753         try:
12754                 st = os.stat(myportdir)
12755         except OSError:
12756                 st = None
12757         if st is None:
12758                 print ">>>",myportdir,"not found, creating it."
12759                 os.makedirs(myportdir,0755)
12760                 st = os.stat(myportdir)
12761
12762         spawn_kwargs = {}
12763         spawn_kwargs["env"] = settings.environ()
12764         if 'usersync' in settings.features and \
12765                 portage.data.secpass >= 2 and \
12766                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12767                 st.st_gid != os.getgid() and st.st_mode & 0070):
12768                 try:
12769                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12770                 except KeyError:
12771                         pass
12772                 else:
12773                         # Drop privileges when syncing, in order to match
12774                         # existing uid/gid settings.
12775                         spawn_kwargs["uid"]    = st.st_uid
12776                         spawn_kwargs["gid"]    = st.st_gid
12777                         spawn_kwargs["groups"] = [st.st_gid]
12778                         spawn_kwargs["env"]["HOME"] = homedir
12779                         umask = 0002
12780                         if not st.st_mode & 0020:
12781                                 umask = umask | 0020
12782                         spawn_kwargs["umask"] = umask
12783
12784         syncuri = settings.get("SYNC", "").strip()
12785         if not syncuri:
12786                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12787                         noiselevel=-1, level=logging.ERROR)
12788                 return 1
12789
12790         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12791         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12792
12793         os.umask(0022)
12794         dosyncuri = syncuri
12795         updatecache_flg = False
12796         if myaction == "metadata":
12797                 print "skipping sync"
12798                 updatecache_flg = True
12799         elif ".git" in vcs_dirs:
12800                 # Update existing git repository, and ignore the syncuri. We are
12801                 # going to trust the user and assume that the user is in the branch
12802                 # that he/she wants updated. We'll let the user manage branches with
12803                 # git directly.
12804                 if portage.process.find_binary("git") is None:
12805                         msg = ["Command not found: git",
12806                         "Type \"emerge dev-util/git\" to enable git support."]
12807                         for l in msg:
12808                                 writemsg_level("!!! %s\n" % l,
12809                                         level=logging.ERROR, noiselevel=-1)
12810                         return 1
12811                 msg = ">>> Starting git pull in %s..." % myportdir
12812                 emergelog(xterm_titles, msg )
12813                 writemsg_level(msg + "\n")
12814                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12815                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12816                 if exitcode != os.EX_OK:
12817                         msg = "!!! git pull error in %s." % myportdir
12818                         emergelog(xterm_titles, msg)
12819                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12820                         return exitcode
12821                 msg = ">>> Git pull in %s successful" % myportdir
12822                 emergelog(xterm_titles, msg)
12823                 writemsg_level(msg + "\n")
12824                 exitcode = git_sync_timestamps(settings, myportdir)
12825                 if exitcode == os.EX_OK:
12826                         updatecache_flg = True
12827         elif syncuri[:8]=="rsync://":
12828                 for vcs_dir in vcs_dirs:
12829                         writemsg_level(("!!! %s appears to be under revision " + \
12830                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12831                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12832                         return 1
12833                 if not os.path.exists("/usr/bin/rsync"):
12834                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12835                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12836                         sys.exit(1)
12837                 mytimeout=180
12838
12839                 rsync_opts = []
12840                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12841                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12842                         rsync_opts.extend([
12843                                 "--recursive",    # Recurse directories
12844                                 "--links",        # Consider symlinks
12845                                 "--safe-links",   # Ignore links outside of tree
12846                                 "--perms",        # Preserve permissions
12847                                 "--times",        # Preserive mod times
12848                                 "--compress",     # Compress the data transmitted
12849                                 "--force",        # Force deletion on non-empty dirs
12850                                 "--whole-file",   # Don't do block transfers, only entire files
12851                                 "--delete",       # Delete files that aren't in the master tree
12852                                 "--stats",        # Show final statistics about what was transfered
12853                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12854                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12855                                 "--exclude=/local",       # Exclude local     from consideration
12856                                 "--exclude=/packages",    # Exclude packages  from consideration
12857                         ])
12858
12859                 else:
12860                         # The below validation is not needed when using the above hardcoded
12861                         # defaults.
12862
12863                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12864                         rsync_opts.extend(
12865                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12866                         for opt in ("--recursive", "--times"):
12867                                 if opt not in rsync_opts:
12868                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12869                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12870                                         rsync_opts.append(opt)
12871         
12872                         for exclude in ("distfiles", "local", "packages"):
12873                                 opt = "--exclude=/%s" % exclude
12874                                 if opt not in rsync_opts:
12875                                         portage.writemsg(yellow("WARNING:") + \
12876                                         " adding required option %s not included in "  % opt + \
12877                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12878                                         rsync_opts.append(opt)
12879         
12880                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12881                                 def rsync_opt_startswith(opt_prefix):
12882                                         for x in rsync_opts:
12883                                                 if x.startswith(opt_prefix):
12884                                                         return True
12885                                         return False
12886
12887                                 if not rsync_opt_startswith("--timeout="):
12888                                         rsync_opts.append("--timeout=%d" % mytimeout)
12889
12890                                 for opt in ("--compress", "--whole-file"):
12891                                         if opt not in rsync_opts:
12892                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12893                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12894                                                 rsync_opts.append(opt)
12895
12896                 if "--quiet" in myopts:
12897                         rsync_opts.append("--quiet")    # Shut up a lot
12898                 else:
12899                         rsync_opts.append("--verbose")  # Print filelist
12900
12901                 if "--verbose" in myopts:
12902                         rsync_opts.append("--progress")  # Progress meter for each file
12903
12904                 if "--debug" in myopts:
12905                         rsync_opts.append("--checksum") # Force checksum on all files
12906
12907                 # Real local timestamp file.
12908                 servertimestampfile = os.path.join(
12909                         myportdir, "metadata", "timestamp.chk")
12910
12911                 content = portage.util.grabfile(servertimestampfile)
12912                 mytimestamp = 0
12913                 if content:
12914                         try:
12915                                 mytimestamp = time.mktime(time.strptime(content[0],
12916                                         "%a, %d %b %Y %H:%M:%S +0000"))
12917                         except (OverflowError, ValueError):
12918                                 pass
12919                 del content
12920
12921                 try:
12922                         rsync_initial_timeout = \
12923                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12924                 except ValueError:
12925                         rsync_initial_timeout = 15
12926
12927                 try:
12928                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12929                 except SystemExit, e:
12930                         raise # Needed else can't exit
12931                 except:
12932                         maxretries=3 #default number of retries
12933
12934                 retries=0
12935                 user_name, hostname, port = re.split(
12936                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12937                 if port is None:
12938                         port=""
12939                 if user_name is None:
12940                         user_name=""
12941                 updatecache_flg=True
12942                 all_rsync_opts = set(rsync_opts)
12943                 extra_rsync_opts = shlex.split(
12944                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12945                 all_rsync_opts.update(extra_rsync_opts)
12946                 family = socket.AF_INET
12947                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12948                         family = socket.AF_INET
12949                 elif socket.has_ipv6 and \
12950                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12951                         family = socket.AF_INET6
12952                 ips=[]
12953                 SERVER_OUT_OF_DATE = -1
12954                 EXCEEDED_MAX_RETRIES = -2
12955                 while (1):
12956                         if ips:
12957                                 del ips[0]
12958                         if ips==[]:
12959                                 try:
12960                                         for addrinfo in socket.getaddrinfo(
12961                                                 hostname, None, family, socket.SOCK_STREAM):
12962                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12963                                                         # IPv6 addresses need to be enclosed in square brackets
12964                                                         ips.append("[%s]" % addrinfo[4][0])
12965                                                 else:
12966                                                         ips.append(addrinfo[4][0])
12967                                         from random import shuffle
12968                                         shuffle(ips)
12969                                 except SystemExit, e:
12970                                         raise # Needed else can't exit
12971                                 except Exception, e:
12972                                         print "Notice:",str(e)
12973                                         dosyncuri=syncuri
12974
12975                         if ips:
12976                                 try:
12977                                         dosyncuri = syncuri.replace(
12978                                                 "//" + user_name + hostname + port + "/",
12979                                                 "//" + user_name + ips[0] + port + "/", 1)
12980                                 except SystemExit, e:
12981                                         raise # Needed else can't exit
12982                                 except Exception, e:
12983                                         print "Notice:",str(e)
12984                                         dosyncuri=syncuri
12985
12986                         if (retries==0):
12987                                 if "--ask" in myopts:
12988                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12989                                                 print
12990                                                 print "Quitting."
12991                                                 print
12992                                                 sys.exit(0)
12993                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12994                                 if "--quiet" not in myopts:
12995                                         print ">>> Starting rsync with "+dosyncuri+"..."
12996                         else:
12997                                 emergelog(xterm_titles,
12998                                         ">>> Starting retry %d of %d with %s" % \
12999                                                 (retries,maxretries,dosyncuri))
13000                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13001
13002                         if mytimestamp != 0 and "--quiet" not in myopts:
13003                                 print ">>> Checking server timestamp ..."
13004
13005                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13006
13007                         if "--debug" in myopts:
13008                                 print rsynccommand
13009
13010                         exitcode = os.EX_OK
13011                         servertimestamp = 0
13012                         # Even if there's no timestamp available locally, fetch the
13013                         # timestamp anyway as an initial probe to verify that the server is
13014                         # responsive.  This protects us from hanging indefinitely on a
13015                         # connection attempt to an unresponsive server which rsync's
13016                         # --timeout option does not prevent.
13017                         if True:
13018                                 # Temporary file for remote server timestamp comparison.
13019                                 from tempfile import mkstemp
13020                                 fd, tmpservertimestampfile = mkstemp()
13021                                 os.close(fd)
13022                                 mycommand = rsynccommand[:]
13023                                 mycommand.append(dosyncuri.rstrip("/") + \
13024                                         "/metadata/timestamp.chk")
13025                                 mycommand.append(tmpservertimestampfile)
13026                                 content = None
13027                                 mypids = []
13028                                 try:
13029                                         def timeout_handler(signum, frame):
13030                                                 raise portage.exception.PortageException("timed out")
13031                                         signal.signal(signal.SIGALRM, timeout_handler)
13032                                         # Timeout here in case the server is unresponsive.  The
13033                                         # --timeout rsync option doesn't apply to the initial
13034                                         # connection attempt.
13035                                         if rsync_initial_timeout:
13036                                                 signal.alarm(rsync_initial_timeout)
13037                                         try:
13038                                                 mypids.extend(portage.process.spawn(
13039                                                         mycommand, env=settings.environ(), returnpid=True))
13040                                                 exitcode = os.waitpid(mypids[0], 0)[1]
13041                                                 content = portage.grabfile(tmpservertimestampfile)
13042                                         finally:
13043                                                 if rsync_initial_timeout:
13044                                                         signal.alarm(0)
13045                                                 try:
13046                                                         os.unlink(tmpservertimestampfile)
13047                                                 except OSError:
13048                                                         pass
13049                                 except portage.exception.PortageException, e:
13050                                         # timed out
13051                                         print e
13052                                         del e
13053                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13054                                                 os.kill(mypids[0], signal.SIGTERM)
13055                                                 os.waitpid(mypids[0], 0)
13056                                         # This is the same code rsync uses for timeout.
13057                                         exitcode = 30
13058                                 else:
13059                                         if exitcode != os.EX_OK:
13060                                                 if exitcode & 0xff:
13061                                                         exitcode = (exitcode & 0xff) << 8
13062                                                 else:
13063                                                         exitcode = exitcode >> 8
13064                                 if mypids:
13065                                         portage.process.spawned_pids.remove(mypids[0])
13066                                 if content:
13067                                         try:
13068                                                 servertimestamp = time.mktime(time.strptime(
13069                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13070                                         except (OverflowError, ValueError):
13071                                                 pass
13072                                 del mycommand, mypids, content
13073                         if exitcode == os.EX_OK:
13074                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13075                                         emergelog(xterm_titles,
13076                                                 ">>> Cancelling sync -- Already current.")
13077                                         print
13078                                         print ">>>"
13079                                         print ">>> Timestamps on the server and in the local repository are the same."
13080                                         print ">>> Cancelling all further sync action. You are already up to date."
13081                                         print ">>>"
13082                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13083                                         print ">>>"
13084                                         print
13085                                         sys.exit(0)
13086                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13087                                         emergelog(xterm_titles,
13088                                                 ">>> Server out of date: %s" % dosyncuri)
13089                                         print
13090                                         print ">>>"
13091                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13092                                         print ">>>"
13093                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13094                                         print ">>>"
13095                                         print
13096                                         exitcode = SERVER_OUT_OF_DATE
13097                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13098                                         # actual sync
13099                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13100                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13101                                         if exitcode in [0,1,3,4,11,14,20,21]:
13102                                                 break
13103                         elif exitcode in [1,3,4,11,14,20,21]:
13104                                 break
13105                         else:
13106                                 # Code 2 indicates protocol incompatibility, which is expected
13107                                 # for servers with protocol < 29 that don't support
13108                                 # --prune-empty-directories.  Retry for a server that supports
13109                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13110                                 pass
13111
13112                         retries=retries+1
13113
13114                         if retries<=maxretries:
13115                                 print ">>> Retrying..."
13116                                 time.sleep(11)
13117                         else:
13118                                 # over retries
13119                                 # exit loop
13120                                 updatecache_flg=False
13121                                 exitcode = EXCEEDED_MAX_RETRIES
13122                                 break
13123
13124                 if (exitcode==0):
13125                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13126                 elif exitcode == SERVER_OUT_OF_DATE:
13127                         sys.exit(1)
13128                 elif exitcode == EXCEEDED_MAX_RETRIES:
13129                         sys.stderr.write(
13130                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13131                         sys.exit(1)
13132                 elif (exitcode>0):
13133                         msg = []
13134                         if exitcode==1:
13135                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13136                                 msg.append("that your SYNC statement is proper.")
13137                                 msg.append("SYNC=" + settings["SYNC"])
13138                         elif exitcode==11:
13139                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13140                                 msg.append("this means your disk is full, but can be caused by corruption")
13141                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13142                                 msg.append("and try again after the problem has been fixed.")
13143                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13144                         elif exitcode==20:
13145                                 msg.append("Rsync was killed before it finished.")
13146                         else:
13147                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13148                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13149                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13150                                 msg.append("temporary problem unless complications exist with your network")
13151                                 msg.append("(and possibly your system's filesystem) configuration.")
13152                         for line in msg:
13153                                 out.eerror(line)
13154                         sys.exit(exitcode)
13155         elif syncuri[:6]=="cvs://":
13156                 if not os.path.exists("/usr/bin/cvs"):
13157                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13158                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13159                         sys.exit(1)
13160                 cvsroot=syncuri[6:]
13161                 cvsdir=os.path.dirname(myportdir)
13162                 if not os.path.exists(myportdir+"/CVS"):
13163                         #initial checkout
13164                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13165                         if os.path.exists(cvsdir+"/gentoo-x86"):
13166                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13167                                 sys.exit(1)
13168                         try:
13169                                 os.rmdir(myportdir)
13170                         except OSError, e:
13171                                 if e.errno != errno.ENOENT:
13172                                         sys.stderr.write(
13173                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13174                                         sys.exit(1)
13175                                 del e
13176                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13177                                 print "!!! cvs checkout error; exiting."
13178                                 sys.exit(1)
13179                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13180                 else:
13181                         #cvs update
13182                         print ">>> Starting cvs update with "+syncuri+"..."
13183                         retval = portage.process.spawn_bash(
13184                                 "cd %s; cvs -z0 -q update -dP" % \
13185                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13186                         if retval != os.EX_OK:
13187                                 sys.exit(retval)
13188                 dosyncuri = syncuri
13189         else:
13190                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13191                         noiselevel=-1, level=logging.ERROR)
13192                 return 1
13193
13194         if updatecache_flg and  \
13195                 myaction != "metadata" and \
13196                 "metadata-transfer" not in settings.features:
13197                 updatecache_flg = False
13198
13199         # Reload the whole config from scratch.
13200         settings, trees, mtimedb = load_emerge_config(trees=trees)
13201         root_config = trees[settings["ROOT"]]["root_config"]
13202         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13203
13204         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13205                 action_metadata(settings, portdb, myopts)
13206
13207         if portage._global_updates(trees, mtimedb["updates"]):
13208                 mtimedb.commit()
13209                 # Reload the whole config from scratch.
13210                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13211                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13212                 root_config = trees[settings["ROOT"]]["root_config"]
13213
13214         mybestpv = portdb.xmatch("bestmatch-visible",
13215                 portage.const.PORTAGE_PACKAGE_ATOM)
13216         mypvs = portage.best(
13217                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13218                 portage.const.PORTAGE_PACKAGE_ATOM))
13219
13220         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13221
13222         if myaction != "metadata":
13223                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13224                         retval = portage.process.spawn(
13225                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13226                                 dosyncuri], env=settings.environ())
13227                         if retval != os.EX_OK:
13228                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13229
13230         if(mybestpv != mypvs) and not "--quiet" in myopts:
13231                 print
13232                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13233                 print red(" * ")+"that you update portage now, before any other packages are updated."
13234                 print
13235                 print red(" * ")+"To update portage, run 'emerge portage' now."
13236                 print
13237         
13238         display_news_notification(root_config, myopts)
13239         return os.EX_OK
13240
13241 def git_sync_timestamps(settings, portdir):
13242         """
13243         Since git doesn't preserve timestamps, synchronize timestamps between
13244         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13245         for a given file as long as the file in the working tree is not modified
13246         (relative to HEAD).
13247         """
13248         cache_dir = os.path.join(portdir, "metadata", "cache")
13249         if not os.path.isdir(cache_dir):
13250                 return os.EX_OK
13251         writemsg_level(">>> Synchronizing timestamps...\n")
13252
13253         from portage.cache.cache_errors import CacheError
13254         try:
13255                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13256                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13257         except CacheError, e:
13258                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13259                         level=logging.ERROR, noiselevel=-1)
13260                 return 1
13261
13262         ec_dir = os.path.join(portdir, "eclass")
13263         try:
13264                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13265                         if f.endswith(".eclass"))
13266         except OSError, e:
13267                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13268                         level=logging.ERROR, noiselevel=-1)
13269                 return 1
13270
13271         args = [portage.const.BASH_BINARY, "-c",
13272                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13273                 portage._shell_quote(portdir)]
13274         import subprocess
13275         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13276         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13277         rval = proc.wait()
13278         if rval != os.EX_OK:
13279                 return rval
13280
13281         modified_eclasses = set(ec for ec in ec_names \
13282                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13283
13284         updated_ec_mtimes = {}
13285
13286         for cpv in cache_db:
13287                 cpv_split = portage.catpkgsplit(cpv)
13288                 if cpv_split is None:
13289                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13290                                 level=logging.ERROR, noiselevel=-1)
13291                         continue
13292
13293                 cat, pn, ver, rev = cpv_split
13294                 cat, pf = portage.catsplit(cpv)
13295                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13296                 if relative_eb_path in modified_files:
13297                         continue
13298
13299                 try:
13300                         cache_entry = cache_db[cpv]
13301                         eb_mtime = cache_entry.get("_mtime_")
13302                         ec_mtimes = cache_entry.get("_eclasses_")
13303                 except KeyError:
13304                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13305                                 level=logging.ERROR, noiselevel=-1)
13306                         continue
13307                 except CacheError, e:
13308                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13309                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13310                         continue
13311
13312                 if eb_mtime is None:
13313                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13314                                 level=logging.ERROR, noiselevel=-1)
13315                         continue
13316
13317                 try:
13318                         eb_mtime = long(eb_mtime)
13319                 except ValueError:
13320                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13321                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13322                         continue
13323
13324                 if ec_mtimes is None:
13325                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13326                                 level=logging.ERROR, noiselevel=-1)
13327                         continue
13328
13329                 if modified_eclasses.intersection(ec_mtimes):
13330                         continue
13331
13332                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13333                 if missing_eclasses:
13334                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13335                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13336                                 noiselevel=-1)
13337                         continue
13338
13339                 eb_path = os.path.join(portdir, relative_eb_path)
13340                 try:
13341                         current_eb_mtime = os.stat(eb_path)
13342                 except OSError:
13343                         writemsg_level("!!! Missing ebuild: %s\n" % \
13344                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13345                         continue
13346
13347                 inconsistent = False
13348                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13349                         updated_mtime = updated_ec_mtimes.get(ec)
13350                         if updated_mtime is not None and updated_mtime != ec_mtime:
13351                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13352                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13353                                 inconsistent = True
13354                                 break
13355
13356                 if inconsistent:
13357                         continue
13358
13359                 if current_eb_mtime != eb_mtime:
13360                         os.utime(eb_path, (eb_mtime, eb_mtime))
13361
13362                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13363                         if ec in updated_ec_mtimes:
13364                                 continue
13365                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13366                         current_mtime = long(os.stat(ec_path).st_mtime)
13367                         if current_mtime != ec_mtime:
13368                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13369                         updated_ec_mtimes[ec] = ec_mtime
13370
13371         return os.EX_OK
13372
13373 def action_metadata(settings, portdb, myopts):
13374         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13375         old_umask = os.umask(0002)
13376         cachedir = os.path.normpath(settings.depcachedir)
13377         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13378                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13379                                         "/sys", "/tmp", "/usr",  "/var"]:
13380                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13381                         "ROOT DIRECTORY ON YOUR SYSTEM."
13382                 print >> sys.stderr, \
13383                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13384                 sys.exit(73)
13385         if not os.path.exists(cachedir):
13386                 os.mkdir(cachedir)
13387
13388         ec = portage.eclass_cache.cache(portdb.porttree_root)
13389         myportdir = os.path.realpath(settings["PORTDIR"])
13390         cm = settings.load_best_module("portdbapi.metadbmodule")(
13391                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13392
13393         from portage.cache import util
13394
13395         class percentage_noise_maker(util.quiet_mirroring):
13396                 def __init__(self, dbapi):
13397                         self.dbapi = dbapi
13398                         self.cp_all = dbapi.cp_all()
13399                         l = len(self.cp_all)
13400                         self.call_update_min = 100000000
13401                         self.min_cp_all = l/100.0
13402                         self.count = 1
13403                         self.pstr = ''
13404
13405                 def __iter__(self):
13406                         for x in self.cp_all:
13407                                 self.count += 1
13408                                 if self.count > self.min_cp_all:
13409                                         self.call_update_min = 0
13410                                         self.count = 0
13411                                 for y in self.dbapi.cp_list(x):
13412                                         yield y
13413                         self.call_update_mine = 0
13414
13415                 def update(self, *arg):
13416                         try:
13417                                 self.pstr = int(self.pstr) + 1
13418                         except ValueError:
13419                                 self.pstr = 1
13420                         sys.stdout.write("%s%i%%" % \
13421                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13422                         sys.stdout.flush()
13423                         self.call_update_min = 10000000
13424
13425                 def finish(self, *arg):
13426                         sys.stdout.write("\b\b\b\b100%\n")
13427                         sys.stdout.flush()
13428
13429         if "--quiet" in myopts:
13430                 def quicky_cpv_generator(cp_all_list):
13431                         for x in cp_all_list:
13432                                 for y in portdb.cp_list(x):
13433                                         yield y
13434                 source = quicky_cpv_generator(portdb.cp_all())
13435                 noise_maker = portage.cache.util.quiet_mirroring()
13436         else:
13437                 noise_maker = source = percentage_noise_maker(portdb)
13438         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13439                 eclass_cache=ec, verbose_instance=noise_maker)
13440
13441         sys.stdout.flush()
13442         os.umask(old_umask)
13443
13444 def action_regen(settings, portdb, max_jobs, max_load):
13445         xterm_titles = "notitles" not in settings.features
13446         emergelog(xterm_titles, " === regen")
13447         #regenerate cache entries
13448         portage.writemsg_stdout("Regenerating cache entries...\n")
13449         try:
13450                 os.close(sys.stdin.fileno())
13451         except SystemExit, e:
13452                 raise # Needed else can't exit
13453         except:
13454                 pass
13455         sys.stdout.flush()
13456
13457         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13458         regen.run()
13459
13460         portage.writemsg_stdout("done!\n")
13461         return regen.returncode
13462
13463 def action_config(settings, trees, myopts, myfiles):
13464         if len(myfiles) != 1:
13465                 print red("!!! config can only take a single package atom at this time\n")
13466                 sys.exit(1)
13467         if not is_valid_package_atom(myfiles[0]):
13468                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13469                         noiselevel=-1)
13470                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13471                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13472                 sys.exit(1)
13473         print
13474         try:
13475                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13476         except portage.exception.AmbiguousPackageName, e:
13477                 # Multiple matches thrown from cpv_expand
13478                 pkgs = e.args[0]
13479         if len(pkgs) == 0:
13480                 print "No packages found.\n"
13481                 sys.exit(0)
13482         elif len(pkgs) > 1:
13483                 if "--ask" in myopts:
13484                         options = []
13485                         print "Please select a package to configure:"
13486                         idx = 0
13487                         for pkg in pkgs:
13488                                 idx += 1
13489                                 options.append(str(idx))
13490                                 print options[-1]+") "+pkg
13491                         print "X) Cancel"
13492                         options.append("X")
13493                         idx = userquery("Selection?", options)
13494                         if idx == "X":
13495                                 sys.exit(0)
13496                         pkg = pkgs[int(idx)-1]
13497                 else:
13498                         print "The following packages available:"
13499                         for pkg in pkgs:
13500                                 print "* "+pkg
13501                         print "\nPlease use a specific atom or the --ask option."
13502                         sys.exit(1)
13503         else:
13504                 pkg = pkgs[0]
13505
13506         print
13507         if "--ask" in myopts:
13508                 if userquery("Ready to configure "+pkg+"?") == "No":
13509                         sys.exit(0)
13510         else:
13511                 print "Configuring pkg..."
13512         print
13513         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13514         mysettings = portage.config(clone=settings)
13515         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13516         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13517         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13518                 mysettings,
13519                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13520                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13521         if retval == os.EX_OK:
13522                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13523                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13524         print
13525
13526 def action_info(settings, trees, myopts, myfiles):
13527         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13528                 settings.profile_path, settings["CHOST"],
13529                 trees[settings["ROOT"]]["vartree"].dbapi)
13530         header_width = 65
13531         header_title = "System Settings"
13532         if myfiles:
13533                 print header_width * "="
13534                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13535         print header_width * "="
13536         print "System uname: "+platform.platform(aliased=1)
13537
13538         lastSync = portage.grabfile(os.path.join(
13539                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13540         print "Timestamp of tree:",
13541         if lastSync:
13542                 print lastSync[0]
13543         else:
13544                 print "Unknown"
13545
13546         output=commands.getstatusoutput("distcc --version")
13547         if not output[0]:
13548                 print str(output[1].split("\n",1)[0]),
13549                 if "distcc" in settings.features:
13550                         print "[enabled]"
13551                 else:
13552                         print "[disabled]"
13553
13554         output=commands.getstatusoutput("ccache -V")
13555         if not output[0]:
13556                 print str(output[1].split("\n",1)[0]),
13557                 if "ccache" in settings.features:
13558                         print "[enabled]"
13559                 else:
13560                         print "[disabled]"
13561
13562         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13563                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13564         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13565         myvars  = portage.util.unique_array(myvars)
13566         myvars.sort()
13567
13568         for x in myvars:
13569                 if portage.isvalidatom(x):
13570                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13571                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13572                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13573                         pkgs = []
13574                         for pn, ver, rev in pkg_matches:
13575                                 if rev != "r0":
13576                                         pkgs.append(ver + "-" + rev)
13577                                 else:
13578                                         pkgs.append(ver)
13579                         if pkgs:
13580                                 pkgs = ", ".join(pkgs)
13581                                 print "%-20s %s" % (x+":", pkgs)
13582                 else:
13583                         print "%-20s %s" % (x+":", "[NOT VALID]")
13584
13585         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13586
13587         if "--verbose" in myopts:
13588                 myvars=settings.keys()
13589         else:
13590                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13591                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13592                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13593                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13594
13595                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13596
13597         myvars = portage.util.unique_array(myvars)
13598         use_expand = settings.get('USE_EXPAND', '').split()
13599         use_expand.sort()
13600         use_expand_hidden = set(
13601                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13602         alphabetical_use = '--alphabetical' in myopts
13603         root_config = trees[settings["ROOT"]]['root_config']
13604         unset_vars = []
13605         myvars.sort()
13606         for x in myvars:
13607                 if x in settings:
13608                         if x != "USE":
13609                                 print '%s="%s"' % (x, settings[x])
13610                         else:
13611                                 use = set(settings["USE"].split())
13612                                 for varname in use_expand:
13613                                         flag_prefix = varname.lower() + "_"
13614                                         for f in list(use):
13615                                                 if f.startswith(flag_prefix):
13616                                                         use.remove(f)
13617                                 use = list(use)
13618                                 use.sort()
13619                                 print 'USE="%s"' % " ".join(use),
13620                                 for varname in use_expand:
13621                                         myval = settings.get(varname)
13622                                         if myval:
13623                                                 print '%s="%s"' % (varname, myval),
13624                                 print
13625                 else:
13626                         unset_vars.append(x)
13627         if unset_vars:
13628                 print "Unset:  "+", ".join(unset_vars)
13629         print
13630
13631         if "--debug" in myopts:
13632                 for x in dir(portage):
13633                         module = getattr(portage, x)
13634                         if "cvs_id_string" in dir(module):
13635                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13636
13637         # See if we can find any packages installed matching the strings
13638         # passed on the command line
13639         mypkgs = []
13640         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13641         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13642         for x in myfiles:
13643                 mypkgs.extend(vardb.match(x))
13644
13645         # If some packages were found...
13646         if mypkgs:
13647                 # Get our global settings (we only print stuff if it varies from
13648                 # the current config)
13649                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13650                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13651                 auxkeys.append('DEFINED_PHASES')
13652                 global_vals = {}
13653                 pkgsettings = portage.config(clone=settings)
13654
13655                 for myvar in mydesiredvars:
13656                         global_vals[myvar] = set(settings.get(myvar, "").split())
13657
13658                 # Loop through each package
13659                 # Only print settings if they differ from global settings
13660                 header_title = "Package Settings"
13661                 print header_width * "="
13662                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13663                 print header_width * "="
13664                 from portage.output import EOutput
13665                 out = EOutput()
13666                 for cpv in mypkgs:
13667                         # Get all package specific variables
13668                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13669                         pkg = Package(built=True, cpv=cpv,
13670                                 installed=True, metadata=izip(Package.metadata_keys,
13671                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13672                                 root_config=root_config, type_name='installed')
13673                         valuesmap = {}
13674                         for k in auxkeys:
13675                                 valuesmap[k] = set(metadata[k].split())
13676
13677                         diff_values = {}
13678                         for myvar in mydesiredvars:
13679                                 # If the package variable doesn't match the
13680                                 # current global variable, something has changed
13681                                 # so set diff_found so we know to print
13682                                 if valuesmap[myvar] != global_vals[myvar]:
13683                                         diff_values[myvar] = valuesmap[myvar]
13684
13685                         print "\n%s was built with the following:" % \
13686                                 colorize("INFORM", str(pkg.cpv))
13687
13688                         pkgsettings.setcpv(pkg)
13689                         forced_flags = set(chain(pkgsettings.useforce,
13690                                 pkgsettings.usemask))
13691                         use = set(pkg.use.enabled)
13692                         use.discard(pkgsettings.get('ARCH'))
13693                         use_expand_flags = set()
13694                         use_enabled = {}
13695                         use_disabled = {}
13696                         for varname in use_expand:
13697                                 flag_prefix = varname.lower() + "_"
13698                                 for f in use:
13699                                         if f.startswith(flag_prefix):
13700                                                 use_expand_flags.add(f)
13701                                                 use_enabled.setdefault(
13702                                                         varname.upper(), []).append(f[len(flag_prefix):])
13703
13704                                 for f in pkg.iuse.all:
13705                                         if f.startswith(flag_prefix):
13706                                                 use_expand_flags.add(f)
13707                                                 if f not in use:
13708                                                         use_disabled.setdefault(
13709                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13710
13711                         var_order = set(use_enabled)
13712                         var_order.update(use_disabled)
13713                         var_order = sorted(var_order)
13714                         var_order.insert(0, 'USE')
13715                         use.difference_update(use_expand_flags)
13716                         use_enabled['USE'] = list(use)
13717                         use_disabled['USE'] = []
13718
13719                         for f in pkg.iuse.all:
13720                                 if f not in use and \
13721                                         f not in use_expand_flags:
13722                                         use_disabled['USE'].append(f)
13723
13724                         for varname in var_order:
13725                                 if varname in use_expand_hidden:
13726                                         continue
13727                                 flags = []
13728                                 for f in use_enabled.get(varname, []):
13729                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13730                                 for f in use_disabled.get(varname, []):
13731                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13732                                 if alphabetical_use:
13733                                         flags.sort(key=UseFlagDisplay.sort_combined)
13734                                 else:
13735                                         flags.sort(key=UseFlagDisplay.sort_separated)
13736                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13737                         print
13738
13739                         # If a difference was found, print the info for
13740                         # this package.
13741                         if diff_values:
13742                                 # Print package info
13743                                 for myvar in mydesiredvars:
13744                                         if myvar in diff_values:
13745                                                 mylist = list(diff_values[myvar])
13746                                                 mylist.sort()
13747                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13748                         print
13749
13750                         if metadata['DEFINED_PHASES']:
13751                                 if 'info' not in metadata['DEFINED_PHASES'].split():
13752                                         continue
13753
13754                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13755                         ebuildpath = vardb.findname(pkg.cpv)
13756                         if not ebuildpath or not os.path.exists(ebuildpath):
13757                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13758                                 continue
13759                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13760                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13761                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13762                                 tree="vartree")
13763
13764 def action_search(root_config, myopts, myfiles, spinner):
13765         if not myfiles:
13766                 print "emerge: no search terms provided."
13767         else:
13768                 searchinstance = search(root_config,
13769                         spinner, "--searchdesc" in myopts,
13770                         "--quiet" not in myopts, "--usepkg" in myopts,
13771                         "--usepkgonly" in myopts)
13772                 for mysearch in myfiles:
13773                         try:
13774                                 searchinstance.execute(mysearch)
13775                         except re.error, comment:
13776                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13777                                 sys.exit(1)
13778                         searchinstance.output()
13779
13780 def action_uninstall(settings, trees, ldpath_mtimes,
13781         opts, action, files, spinner):
13782
13783         # For backward compat, some actions do not require leading '='.
13784         ignore_missing_eq = action in ('clean', 'unmerge')
13785         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13786         valid_atoms = []
13787
13788         # Ensure atoms are valid before calling unmerge().
13789         # For backward compat, leading '=' is not required.
13790         for x in files:
13791                 if not (is_valid_package_atom(x) or \
13792                         (ignore_missing_eq and is_valid_package_atom("=" + x))):
13793
13794                         msg = []
13795                         msg.append("'%s' is not a valid package atom." % (x,))
13796                         msg.append("Please check ebuild(5) for full details.")
13797                         writemsg_level("".join("!!! %s\n" % line for line in msg),
13798                                 level=logging.ERROR, noiselevel=-1)
13799                         return 1
13800
13801                 try:
13802                         valid_atoms.append(
13803                                 portage.dep_expand(x, mydb=vardb, settings=settings))
13804                 except portage.exception.AmbiguousPackageName, e:
13805                         msg = "The short ebuild name \"" + x + \
13806                                 "\" is ambiguous.  Please specify " + \
13807                                 "one of the following " + \
13808                                 "fully-qualified ebuild names instead:"
13809                         for line in textwrap.wrap(msg, 70):
13810                                 writemsg_level("!!! %s\n" % (line,),
13811                                         level=logging.ERROR, noiselevel=-1)
13812                         for i in e[0]:
13813                                 writemsg_level("    %s\n" % colorize("INFORM", i),
13814                                         level=logging.ERROR, noiselevel=-1)
13815                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13816                         return 1
13817
13818         if action in ('clean', 'unmerge') or \
13819                 (action == 'prune' and "--nodeps" in opts):
13820                 # When given a list of atoms, unmerge them in the order given.
13821                 ordered = action == 'unmerge'
13822                 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
13823                         valid_atoms, ldpath_mtimes, ordered=ordered)
13824                 rval = os.EX_OK
13825         else:
13826                 rval = action_depclean(settings, trees, ldpath_mtimes,
13827                         opts, action, valid_atoms, spinner)
13828
13829         return rval
13830
13831 def action_depclean(settings, trees, ldpath_mtimes,
13832         myopts, action, myfiles, spinner):
13833         # Kill packages that aren't explicitly merged or are required as a
13834         # dependency of another package. World file is explicit.
13835
13836         # Global depclean or prune operations are not very safe when there are
13837         # missing dependencies since it's unknown how badly incomplete
13838         # the dependency graph is, and we might accidentally remove packages
13839         # that should have been pulled into the graph. On the other hand, it's
13840         # relatively safe to ignore missing deps when only asked to remove
13841         # specific packages.
13842         allow_missing_deps = len(myfiles) > 0
13843
13844         msg = []
13845         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13846         msg.append("mistakes. Packages that are part of the world set will always\n")
13847         msg.append("be kept.  They can be manually added to this set with\n")
13848         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13849         msg.append("package.provided (see portage(5)) will be removed by\n")
13850         msg.append("depclean, even if they are part of the world set.\n")
13851         msg.append("\n")
13852         msg.append("As a safety measure, depclean will not remove any packages\n")
13853         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13854         msg.append("consequence, it is often necessary to run %s\n" % \
13855                 good("`emerge --update"))
13856         msg.append(good("--newuse --deep @system @world`") + \
13857                 " prior to depclean.\n")
13858
13859         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13860                 portage.writemsg_stdout("\n")
13861                 for x in msg:
13862                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13863
13864         xterm_titles = "notitles" not in settings.features
13865         myroot = settings["ROOT"]
13866         root_config = trees[myroot]["root_config"]
13867         getSetAtoms = root_config.setconfig.getSetAtoms
13868         vardb = trees[myroot]["vartree"].dbapi
13869
13870         required_set_names = ("system", "world")
13871         required_sets = {}
13872         set_args = []
13873
13874         for s in required_set_names:
13875                 required_sets[s] = InternalPackageSet(
13876                         initial_atoms=getSetAtoms(s))
13877
13878         
13879         # When removing packages, use a temporary version of world
13880         # which excludes packages that are intended to be eligible for
13881         # removal.
13882         world_temp_set = required_sets["world"]
13883         system_set = required_sets["system"]
13884
13885         if not system_set or not world_temp_set:
13886
13887                 if not system_set:
13888                         writemsg_level("!!! You have no system list.\n",
13889                                 level=logging.ERROR, noiselevel=-1)
13890
13891                 if not world_temp_set:
13892                         writemsg_level("!!! You have no world file.\n",
13893                                         level=logging.WARNING, noiselevel=-1)
13894
13895                 writemsg_level("!!! Proceeding is likely to " + \
13896                         "break your installation.\n",
13897                         level=logging.WARNING, noiselevel=-1)
13898                 if "--pretend" not in myopts:
13899                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13900
13901         if action == "depclean":
13902                 emergelog(xterm_titles, " >>> depclean")
13903
13904         import textwrap
13905         args_set = InternalPackageSet()
13906         if myfiles:
13907                 args_set.update(myfiles)
13908                 matched_packages = False
13909                 for x in args_set:
13910                         if vardb.match(x):
13911                                 matched_packages = True
13912                                 break
13913                 if not matched_packages:
13914                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13915                                 action)
13916                         return
13917
13918         writemsg_level("\nCalculating dependencies  ")
13919         resolver_params = create_depgraph_params(myopts, "remove")
13920         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13921         vardb = resolver.trees[myroot]["vartree"].dbapi
13922
13923         if action == "depclean":
13924
13925                 if args_set:
13926                         # Pull in everything that's installed but not matched
13927                         # by an argument atom since we don't want to clean any
13928                         # package if something depends on it.
13929
13930                         world_temp_set.clear()
13931                         for pkg in vardb:
13932                                 spinner.update()
13933
13934                                 try:
13935                                         if args_set.findAtomForPackage(pkg) is None:
13936                                                 world_temp_set.add("=" + pkg.cpv)
13937                                                 continue
13938                                 except portage.exception.InvalidDependString, e:
13939                                         show_invalid_depstring_notice(pkg,
13940                                                 pkg.metadata["PROVIDE"], str(e))
13941                                         del e
13942                                         world_temp_set.add("=" + pkg.cpv)
13943                                         continue
13944
13945         elif action == "prune":
13946
13947                 # Pull in everything that's installed since we don't
13948                 # to prune a package if something depends on it.
13949                 world_temp_set.clear()
13950                 world_temp_set.update(vardb.cp_all())
13951
13952                 if not args_set:
13953
13954                         # Try to prune everything that's slotted.
13955                         for cp in vardb.cp_all():
13956                                 if len(vardb.cp_list(cp)) > 1:
13957                                         args_set.add(cp)
13958
13959                 # Remove atoms from world that match installed packages
13960                 # that are also matched by argument atoms, but do not remove
13961                 # them if they match the highest installed version.
13962                 for pkg in vardb:
13963                         spinner.update()
13964                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13965                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13966                                 raise AssertionError("package expected in matches: " + \
13967                                         "cp = %s, cpv = %s matches = %s" % \
13968                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13969
13970                         highest_version = pkgs_for_cp[-1]
13971                         if pkg == highest_version:
13972                                 # pkg is the highest version
13973                                 world_temp_set.add("=" + pkg.cpv)
13974                                 continue
13975
13976                         if len(pkgs_for_cp) <= 1:
13977                                 raise AssertionError("more packages expected: " + \
13978                                         "cp = %s, cpv = %s matches = %s" % \
13979                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13980
13981                         try:
13982                                 if args_set.findAtomForPackage(pkg) is None:
13983                                         world_temp_set.add("=" + pkg.cpv)
13984                                         continue
13985                         except portage.exception.InvalidDependString, e:
13986                                 show_invalid_depstring_notice(pkg,
13987                                         pkg.metadata["PROVIDE"], str(e))
13988                                 del e
13989                                 world_temp_set.add("=" + pkg.cpv)
13990                                 continue
13991
13992         set_args = {}
13993         for s, package_set in required_sets.iteritems():
13994                 set_atom = SETPREFIX + s
13995                 set_arg = SetArg(arg=set_atom, set=package_set,
13996                         root_config=resolver.roots[myroot])
13997                 set_args[s] = set_arg
13998                 for atom in set_arg.set:
13999                         resolver._dep_stack.append(
14000                                 Dependency(atom=atom, root=myroot, parent=set_arg))
14001                         resolver.digraph.add(set_arg, None)
14002
14003         success = resolver._complete_graph()
14004         writemsg_level("\b\b... done!\n")
14005
14006         resolver.display_problems()
14007
14008         if not success:
14009                 return 1
14010
14011         def unresolved_deps():
14012
14013                 unresolvable = set()
14014                 for dep in resolver._initially_unsatisfied_deps:
14015                         if isinstance(dep.parent, Package) and \
14016                                 (dep.priority > UnmergeDepPriority.SOFT):
14017                                 unresolvable.add((dep.atom, dep.parent.cpv))
14018
14019                 if not unresolvable:
14020                         return False
14021
14022                 if unresolvable and not allow_missing_deps:
14023                         prefix = bad(" * ")
14024                         msg = []
14025                         msg.append("Dependencies could not be completely resolved due to")
14026                         msg.append("the following required packages not being installed:")
14027                         msg.append("")
14028                         for atom, parent in unresolvable:
14029                                 msg.append("  %s pulled in by:" % (atom,))
14030                                 msg.append("    %s" % (parent,))
14031                                 msg.append("")
14032                         msg.append("Have you forgotten to run " + \
14033                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
14034                         msg.append(("to %s? It may be necessary to manually " + \
14035                                 "uninstall packages that no longer") % action)
14036                         msg.append("exist in the portage tree since " + \
14037                                 "it may not be possible to satisfy their")
14038                         msg.append("dependencies.  Also, be aware of " + \
14039                                 "the --with-bdeps option that is documented")
14040                         msg.append("in " + good("`man emerge`") + ".")
14041                         if action == "prune":
14042                                 msg.append("")
14043                                 msg.append("If you would like to ignore " + \
14044                                         "dependencies then use %s." % good("--nodeps"))
14045                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14046                                 level=logging.ERROR, noiselevel=-1)
14047                         return True
14048                 return False
14049
14050         if unresolved_deps():
14051                 return 1
14052
14053         graph = resolver.digraph.copy()
14054         required_pkgs_total = 0
14055         for node in graph:
14056                 if isinstance(node, Package):
14057                         required_pkgs_total += 1
14058
14059         def show_parents(child_node):
14060                 parent_nodes = graph.parent_nodes(child_node)
14061                 if not parent_nodes:
14062                         # With --prune, the highest version can be pulled in without any
14063                         # real parent since all installed packages are pulled in.  In that
14064                         # case there's nothing to show here.
14065                         return
14066                 parent_strs = []
14067                 for node in parent_nodes:
14068                         parent_strs.append(str(getattr(node, "cpv", node)))
14069                 parent_strs.sort()
14070                 msg = []
14071                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
14072                 for parent_str in parent_strs:
14073                         msg.append("    %s\n" % (parent_str,))
14074                 msg.append("\n")
14075                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14076
14077         def cmp_pkg_cpv(pkg1, pkg2):
14078                 """Sort Package instances by cpv."""
14079                 if pkg1.cpv > pkg2.cpv:
14080                         return 1
14081                 elif pkg1.cpv == pkg2.cpv:
14082                         return 0
14083                 else:
14084                         return -1
14085
14086         def create_cleanlist():
14087                 pkgs_to_remove = []
14088
14089                 if action == "depclean":
14090                         if args_set:
14091
14092                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14093                                         arg_atom = None
14094                                         try:
14095                                                 arg_atom = args_set.findAtomForPackage(pkg)
14096                                         except portage.exception.InvalidDependString:
14097                                                 # this error has already been displayed by now
14098                                                 continue
14099
14100                                         if arg_atom:
14101                                                 if pkg not in graph:
14102                                                         pkgs_to_remove.append(pkg)
14103                                                 elif "--verbose" in myopts:
14104                                                         show_parents(pkg)
14105
14106                         else:
14107                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14108                                         if pkg not in graph:
14109                                                 pkgs_to_remove.append(pkg)
14110                                         elif "--verbose" in myopts:
14111                                                 show_parents(pkg)
14112
14113                 elif action == "prune":
14114                         # Prune really uses all installed instead of world. It's not
14115                         # a real reverse dependency so don't display it as such.
14116                         graph.remove(set_args["world"])
14117
14118                         for atom in args_set:
14119                                 for pkg in vardb.match_pkgs(atom):
14120                                         if pkg not in graph:
14121                                                 pkgs_to_remove.append(pkg)
14122                                         elif "--verbose" in myopts:
14123                                                 show_parents(pkg)
14124
14125                 if not pkgs_to_remove:
14126                         writemsg_level(
14127                                 ">>> No packages selected for removal by %s\n" % action)
14128                         if "--verbose" not in myopts:
14129                                 writemsg_level(
14130                                         ">>> To see reverse dependencies, use %s\n" % \
14131                                                 good("--verbose"))
14132                         if action == "prune":
14133                                 writemsg_level(
14134                                         ">>> To ignore dependencies, use %s\n" % \
14135                                                 good("--nodeps"))
14136
14137                 return pkgs_to_remove
14138
14139         cleanlist = create_cleanlist()
14140
14141         if len(cleanlist):
14142                 clean_set = set(cleanlist)
14143
14144                 # Check if any of these package are the sole providers of libraries
14145                 # with consumers that have not been selected for removal. If so, these
14146                 # packages and any dependencies need to be added to the graph.
14147                 real_vardb = trees[myroot]["vartree"].dbapi
14148                 linkmap = real_vardb.linkmap
14149                 liblist = linkmap.listLibraryObjects()
14150                 consumer_cache = {}
14151                 provider_cache = {}
14152                 soname_cache = {}
14153                 consumer_map = {}
14154
14155                 writemsg_level(">>> Checking for lib consumers...\n")
14156
14157                 for pkg in cleanlist:
14158                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14159                         provided_libs = set()
14160
14161                         for lib in liblist:
14162                                 if pkg_dblink.isowner(lib, myroot):
14163                                         provided_libs.add(lib)
14164
14165                         if not provided_libs:
14166                                 continue
14167
14168                         consumers = {}
14169                         for lib in provided_libs:
14170                                 lib_consumers = consumer_cache.get(lib)
14171                                 if lib_consumers is None:
14172                                         lib_consumers = linkmap.findConsumers(lib)
14173                                         consumer_cache[lib] = lib_consumers
14174                                 if lib_consumers:
14175                                         consumers[lib] = lib_consumers
14176
14177                         if not consumers:
14178                                 continue
14179
14180                         for lib, lib_consumers in consumers.items():
14181                                 for consumer_file in list(lib_consumers):
14182                                         if pkg_dblink.isowner(consumer_file, myroot):
14183                                                 lib_consumers.remove(consumer_file)
14184                                 if not lib_consumers:
14185                                         del consumers[lib]
14186
14187                         if not consumers:
14188                                 continue
14189
14190                         for lib, lib_consumers in consumers.iteritems():
14191
14192                                 soname = soname_cache.get(lib)
14193                                 if soname is None:
14194                                         soname = linkmap.getSoname(lib)
14195                                         soname_cache[lib] = soname
14196
14197                                 consumer_providers = []
14198                                 for lib_consumer in lib_consumers:
14199                                         providers = provider_cache.get(lib)
14200                                         if providers is None:
14201                                                 providers = linkmap.findProviders(lib_consumer)
14202                                                 provider_cache[lib_consumer] = providers
14203                                         if soname not in providers:
14204                                                 # Why does this happen?
14205                                                 continue
14206                                         consumer_providers.append(
14207                                                 (lib_consumer, providers[soname]))
14208
14209                                 consumers[lib] = consumer_providers
14210
14211                         consumer_map[pkg] = consumers
14212
14213                 if consumer_map:
14214
14215                         search_files = set()
14216                         for consumers in consumer_map.itervalues():
14217                                 for lib, consumer_providers in consumers.iteritems():
14218                                         for lib_consumer, providers in consumer_providers:
14219                                                 search_files.add(lib_consumer)
14220                                                 search_files.update(providers)
14221
14222                         writemsg_level(">>> Assigning files to packages...\n")
14223                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14224
14225                         for pkg, consumers in consumer_map.items():
14226                                 for lib, consumer_providers in consumers.items():
14227                                         lib_consumers = set()
14228
14229                                         for lib_consumer, providers in consumer_providers:
14230                                                 owner_set = file_owners.get(lib_consumer)
14231                                                 provider_dblinks = set()
14232                                                 provider_pkgs = set()
14233
14234                                                 if len(providers) > 1:
14235                                                         for provider in providers:
14236                                                                 provider_set = file_owners.get(provider)
14237                                                                 if provider_set is not None:
14238                                                                         provider_dblinks.update(provider_set)
14239
14240                                                 if len(provider_dblinks) > 1:
14241                                                         for provider_dblink in provider_dblinks:
14242                                                                 pkg_key = ("installed", myroot,
14243                                                                         provider_dblink.mycpv, "nomerge")
14244                                                                 if pkg_key not in clean_set:
14245                                                                         provider_pkgs.add(vardb.get(pkg_key))
14246
14247                                                 if provider_pkgs:
14248                                                         continue
14249
14250                                                 if owner_set is not None:
14251                                                         lib_consumers.update(owner_set)
14252
14253                                         for consumer_dblink in list(lib_consumers):
14254                                                 if ("installed", myroot, consumer_dblink.mycpv,
14255                                                         "nomerge") in clean_set:
14256                                                         lib_consumers.remove(consumer_dblink)
14257                                                         continue
14258
14259                                         if lib_consumers:
14260                                                 consumers[lib] = lib_consumers
14261                                         else:
14262                                                 del consumers[lib]
14263                                 if not consumers:
14264                                         del consumer_map[pkg]
14265
14266                 if consumer_map:
14267                         # TODO: Implement a package set for rebuilding consumer packages.
14268
14269                         msg = "In order to avoid breakage of link level " + \
14270                                 "dependencies, one or more packages will not be removed. " + \
14271                                 "This can be solved by rebuilding " + \
14272                                 "the packages that pulled them in."
14273
14274                         prefix = bad(" * ")
14275                         from textwrap import wrap
14276                         writemsg_level("".join(prefix + "%s\n" % line for \
14277                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14278
14279                         msg = []
14280                         for pkg, consumers in consumer_map.iteritems():
14281                                 unique_consumers = set(chain(*consumers.values()))
14282                                 unique_consumers = sorted(consumer.mycpv \
14283                                         for consumer in unique_consumers)
14284                                 msg.append("")
14285                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14286                                 for consumer in unique_consumers:
14287                                         msg.append("    %s" % (consumer,))
14288                         msg.append("")
14289                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14290                                 level=logging.WARNING, noiselevel=-1)
14291
14292                         # Add lib providers to the graph as children of lib consumers,
14293                         # and also add any dependencies pulled in by the provider.
14294                         writemsg_level(">>> Adding lib providers to graph...\n")
14295
14296                         for pkg, consumers in consumer_map.iteritems():
14297                                 for consumer_dblink in set(chain(*consumers.values())):
14298                                         consumer_pkg = vardb.get(("installed", myroot,
14299                                                 consumer_dblink.mycpv, "nomerge"))
14300                                         if not resolver._add_pkg(pkg,
14301                                                 Dependency(parent=consumer_pkg,
14302                                                 priority=UnmergeDepPriority(runtime=True),
14303                                                 root=pkg.root)):
14304                                                 resolver.display_problems()
14305                                                 return 1
14306
14307                         writemsg_level("\nCalculating dependencies  ")
14308                         success = resolver._complete_graph()
14309                         writemsg_level("\b\b... done!\n")
14310                         resolver.display_problems()
14311                         if not success:
14312                                 return 1
14313                         if unresolved_deps():
14314                                 return 1
14315
14316                         graph = resolver.digraph.copy()
14317                         required_pkgs_total = 0
14318                         for node in graph:
14319                                 if isinstance(node, Package):
14320                                         required_pkgs_total += 1
14321                         cleanlist = create_cleanlist()
14322                         if not cleanlist:
14323                                 return 0
14324                         clean_set = set(cleanlist)
14325
14326                 # Use a topological sort to create an unmerge order such that
14327                 # each package is unmerged before it's dependencies. This is
14328                 # necessary to avoid breaking things that may need to run
14329                 # during pkg_prerm or pkg_postrm phases.
14330
14331                 # Create a new graph to account for dependencies between the
14332                 # packages being unmerged.
14333                 graph = digraph()
14334                 del cleanlist[:]
14335
14336                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14337                 runtime = UnmergeDepPriority(runtime=True)
14338                 runtime_post = UnmergeDepPriority(runtime_post=True)
14339                 buildtime = UnmergeDepPriority(buildtime=True)
14340                 priority_map = {
14341                         "RDEPEND": runtime,
14342                         "PDEPEND": runtime_post,
14343                         "DEPEND": buildtime,
14344                 }
14345
14346                 for node in clean_set:
14347                         graph.add(node, None)
14348                         mydeps = []
14349                         node_use = node.metadata["USE"].split()
14350                         for dep_type in dep_keys:
14351                                 depstr = node.metadata[dep_type]
14352                                 if not depstr:
14353                                         continue
14354                                 try:
14355                                         portage.dep._dep_check_strict = False
14356                                         success, atoms = portage.dep_check(depstr, None, settings,
14357                                                 myuse=node_use, trees=resolver._graph_trees,
14358                                                 myroot=myroot)
14359                                 finally:
14360                                         portage.dep._dep_check_strict = True
14361                                 if not success:
14362                                         # Ignore invalid deps of packages that will
14363                                         # be uninstalled anyway.
14364                                         continue
14365
14366                                 priority = priority_map[dep_type]
14367                                 for atom in atoms:
14368                                         if not isinstance(atom, portage.dep.Atom):
14369                                                 # Ignore invalid atoms returned from dep_check().
14370                                                 continue
14371                                         if atom.blocker:
14372                                                 continue
14373                                         matches = vardb.match_pkgs(atom)
14374                                         if not matches:
14375                                                 continue
14376                                         for child_node in matches:
14377                                                 if child_node in clean_set:
14378                                                         graph.add(child_node, node, priority=priority)
14379
14380                 ordered = True
14381                 if len(graph.order) == len(graph.root_nodes()):
14382                         # If there are no dependencies between packages
14383                         # let unmerge() group them by cat/pn.
14384                         ordered = False
14385                         cleanlist = [pkg.cpv for pkg in graph.order]
14386                 else:
14387                         # Order nodes from lowest to highest overall reference count for
14388                         # optimal root node selection.
14389                         node_refcounts = {}
14390                         for node in graph.order:
14391                                 node_refcounts[node] = len(graph.parent_nodes(node))
14392                         def cmp_reference_count(node1, node2):
14393                                 return node_refcounts[node1] - node_refcounts[node2]
14394                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14395         
14396                         ignore_priority_range = [None]
14397                         ignore_priority_range.extend(
14398                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14399                         while not graph.empty():
14400                                 for ignore_priority in ignore_priority_range:
14401                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14402                                         if nodes:
14403                                                 break
14404                                 if not nodes:
14405                                         raise AssertionError("no root nodes")
14406                                 if ignore_priority is not None:
14407                                         # Some deps have been dropped due to circular dependencies,
14408                                         # so only pop one node in order do minimize the number that
14409                                         # are dropped.
14410                                         del nodes[1:]
14411                                 for node in nodes:
14412                                         graph.remove(node)
14413                                         cleanlist.append(node.cpv)
14414
14415                 unmerge(root_config, myopts, "unmerge", cleanlist,
14416                         ldpath_mtimes, ordered=ordered)
14417
14418         if action == "prune":
14419                 return
14420
14421         if not cleanlist and "--quiet" in myopts:
14422                 return
14423
14424         print "Packages installed:   "+str(len(vardb.cpv_all()))
14425         print "Packages in world:    " + \
14426                 str(len(root_config.sets["world"].getAtoms()))
14427         print "Packages in system:   " + \
14428                 str(len(root_config.sets["system"].getAtoms()))
14429         print "Required packages:    "+str(required_pkgs_total)
14430         if "--pretend" in myopts:
14431                 print "Number to remove:     "+str(len(cleanlist))
14432         else:
14433                 print "Number removed:       "+str(len(cleanlist))
14434
14435 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14436         """
14437         Construct a depgraph for the given resume list. This will raise
14438         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14439         @rtype: tuple
14440         @returns: (success, depgraph, dropped_tasks)
14441         """
14442         skip_masked = True
14443         skip_unsatisfied = True
14444         mergelist = mtimedb["resume"]["mergelist"]
14445         dropped_tasks = set()
14446         while True:
14447                 mydepgraph = depgraph(settings, trees,
14448                         myopts, myparams, spinner)
14449                 try:
14450                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14451                                 skip_masked=skip_masked)
14452                 except depgraph.UnsatisfiedResumeDep, e:
14453                         if not skip_unsatisfied:
14454                                 raise
14455
14456                         graph = mydepgraph.digraph
14457                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14458                                 for dep in e.value)
14459                         traversed_nodes = set()
14460                         unsatisfied_stack = list(unsatisfied_parents)
14461                         while unsatisfied_stack:
14462                                 pkg = unsatisfied_stack.pop()
14463                                 if pkg in traversed_nodes:
14464                                         continue
14465                                 traversed_nodes.add(pkg)
14466
14467                                 # If this package was pulled in by a parent
14468                                 # package scheduled for merge, removing this
14469                                 # package may cause the the parent package's
14470                                 # dependency to become unsatisfied.
14471                                 for parent_node in graph.parent_nodes(pkg):
14472                                         if not isinstance(parent_node, Package) \
14473                                                 or parent_node.operation not in ("merge", "nomerge"):
14474                                                 continue
14475                                         unsatisfied = \
14476                                                 graph.child_nodes(parent_node,
14477                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14478                                         if pkg in unsatisfied:
14479                                                 unsatisfied_parents[parent_node] = parent_node
14480                                                 unsatisfied_stack.append(parent_node)
14481
14482                         pruned_mergelist = []
14483                         for x in mergelist:
14484                                 if isinstance(x, list) and \
14485                                         tuple(x) not in unsatisfied_parents:
14486                                         pruned_mergelist.append(x)
14487
14488                         # If the mergelist doesn't shrink then this loop is infinite.
14489                         if len(pruned_mergelist) == len(mergelist):
14490                                 # This happens if a package can't be dropped because
14491                                 # it's already installed, but it has unsatisfied PDEPEND.
14492                                 raise
14493                         mergelist[:] = pruned_mergelist
14494
14495                         # Exclude installed packages that have been removed from the graph due
14496                         # to failure to build/install runtime dependencies after the dependent
14497                         # package has already been installed.
14498                         dropped_tasks.update(pkg for pkg in \
14499                                 unsatisfied_parents if pkg.operation != "nomerge")
14500                         mydepgraph.break_refs(unsatisfied_parents)
14501
14502                         del e, graph, traversed_nodes, \
14503                                 unsatisfied_parents, unsatisfied_stack
14504                         continue
14505                 else:
14506                         break
14507         return (success, mydepgraph, dropped_tasks)
14508
14509 def action_build(settings, trees, mtimedb,
14510         myopts, myaction, myfiles, spinner):
14511
14512         # validate the state of the resume data
14513         # so that we can make assumptions later.
14514         for k in ("resume", "resume_backup"):
14515                 if k not in mtimedb:
14516                         continue
14517                 resume_data = mtimedb[k]
14518                 if not isinstance(resume_data, dict):
14519                         del mtimedb[k]
14520                         continue
14521                 mergelist = resume_data.get("mergelist")
14522                 if not isinstance(mergelist, list):
14523                         del mtimedb[k]
14524                         continue
14525                 for x in mergelist:
14526                         if not (isinstance(x, list) and len(x) == 4):
14527                                 continue
14528                         pkg_type, pkg_root, pkg_key, pkg_action = x
14529                         if pkg_root not in trees:
14530                                 # Current $ROOT setting differs,
14531                                 # so the list must be stale.
14532                                 mergelist = None
14533                                 break
14534                 if not mergelist:
14535                         del mtimedb[k]
14536                         continue
14537                 resume_opts = resume_data.get("myopts")
14538                 if not isinstance(resume_opts, (dict, list)):
14539                         del mtimedb[k]
14540                         continue
14541                 favorites = resume_data.get("favorites")
14542                 if not isinstance(favorites, list):
14543                         del mtimedb[k]
14544                         continue
14545
14546         resume = False
14547         if "--resume" in myopts and \
14548                 ("resume" in mtimedb or
14549                 "resume_backup" in mtimedb):
14550                 resume = True
14551                 if "resume" not in mtimedb:
14552                         mtimedb["resume"] = mtimedb["resume_backup"]
14553                         del mtimedb["resume_backup"]
14554                         mtimedb.commit()
14555                 # "myopts" is a list for backward compatibility.
14556                 resume_opts = mtimedb["resume"].get("myopts", [])
14557                 if isinstance(resume_opts, list):
14558                         resume_opts = dict((k,True) for k in resume_opts)
14559                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14560                         resume_opts.pop(opt, None)
14561
14562                 # Current options always override resume_opts.
14563                 resume_opts.update(myopts)
14564                 myopts.clear()
14565                 myopts.update(resume_opts)
14566
14567                 if "--debug" in myopts:
14568                         writemsg_level("myopts %s\n" % (myopts,))
14569
14570                 # Adjust config according to options of the command being resumed.
14571                 for myroot in trees:
14572                         mysettings =  trees[myroot]["vartree"].settings
14573                         mysettings.unlock()
14574                         adjust_config(myopts, mysettings)
14575                         mysettings.lock()
14576                         del myroot, mysettings
14577
14578         ldpath_mtimes = mtimedb["ldpath"]
14579         favorites=[]
14580         merge_count = 0
14581         buildpkgonly = "--buildpkgonly" in myopts
14582         pretend = "--pretend" in myopts
14583         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14584         ask = "--ask" in myopts
14585         nodeps = "--nodeps" in myopts
14586         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14587         tree = "--tree" in myopts
14588         if nodeps and tree:
14589                 tree = False
14590                 del myopts["--tree"]
14591                 portage.writemsg(colorize("WARN", " * ") + \
14592                         "--tree is broken with --nodeps. Disabling...\n")
14593         debug = "--debug" in myopts
14594         verbose = "--verbose" in myopts
14595         quiet = "--quiet" in myopts
14596         if pretend or fetchonly:
14597                 # make the mtimedb readonly
14598                 mtimedb.filename = None
14599         if '--digest' in myopts or 'digest' in settings.features:
14600                 if '--digest' in myopts:
14601                         msg = "The --digest option"
14602                 else:
14603                         msg = "The FEATURES=digest setting"
14604
14605                 msg += " can prevent corruption from being" + \
14606                         " noticed. The `repoman manifest` command is the preferred" + \
14607                         " way to generate manifests and it is capable of doing an" + \
14608                         " entire repository or category at once."
14609                 prefix = bad(" * ")
14610                 writemsg(prefix + "\n")
14611                 from textwrap import wrap
14612                 for line in wrap(msg, 72):
14613                         writemsg("%s%s\n" % (prefix, line))
14614                 writemsg(prefix + "\n")
14615
14616         if "--quiet" not in myopts and \
14617                 ("--pretend" in myopts or "--ask" in myopts or \
14618                 "--tree" in myopts or "--verbose" in myopts):
14619                 action = ""
14620                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14621                         action = "fetched"
14622                 elif "--buildpkgonly" in myopts:
14623                         action = "built"
14624                 else:
14625                         action = "merged"
14626                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14627                         print
14628                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14629                         print
14630                 else:
14631                         print
14632                         print darkgreen("These are the packages that would be %s, in order:") % action
14633                         print
14634
14635         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14636         if not show_spinner:
14637                 spinner.update = spinner.update_quiet
14638
14639         if resume:
14640                 favorites = mtimedb["resume"].get("favorites")
14641                 if not isinstance(favorites, list):
14642                         favorites = []
14643
14644                 if show_spinner:
14645                         print "Calculating dependencies  ",
14646                 myparams = create_depgraph_params(myopts, myaction)
14647
14648                 resume_data = mtimedb["resume"]
14649                 mergelist = resume_data["mergelist"]
14650                 if mergelist and "--skipfirst" in myopts:
14651                         for i, task in enumerate(mergelist):
14652                                 if isinstance(task, list) and \
14653                                         task and task[-1] == "merge":
14654                                         del mergelist[i]
14655                                         break
14656
14657                 success = False
14658                 mydepgraph = None
14659                 try:
14660                         success, mydepgraph, dropped_tasks = resume_depgraph(
14661                                 settings, trees, mtimedb, myopts, myparams, spinner)
14662                 except (portage.exception.PackageNotFound,
14663                         depgraph.UnsatisfiedResumeDep), e:
14664                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14665                                 mydepgraph = e.depgraph
14666                         if show_spinner:
14667                                 print
14668                         from textwrap import wrap
14669                         from portage.output import EOutput
14670                         out = EOutput()
14671
14672                         resume_data = mtimedb["resume"]
14673                         mergelist = resume_data.get("mergelist")
14674                         if not isinstance(mergelist, list):
14675                                 mergelist = []
14676                         if mergelist and debug or (verbose and not quiet):
14677                                 out.eerror("Invalid resume list:")
14678                                 out.eerror("")
14679                                 indent = "  "
14680                                 for task in mergelist:
14681                                         if isinstance(task, list):
14682                                                 out.eerror(indent + str(tuple(task)))
14683                                 out.eerror("")
14684
14685                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14686                                 out.eerror("One or more packages are either masked or " + \
14687                                         "have missing dependencies:")
14688                                 out.eerror("")
14689                                 indent = "  "
14690                                 for dep in e.value:
14691                                         if dep.atom is None:
14692                                                 out.eerror(indent + "Masked package:")
14693                                                 out.eerror(2 * indent + str(dep.parent))
14694                                                 out.eerror("")
14695                                         else:
14696                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14697                                                 out.eerror(2 * indent + str(dep.parent))
14698                                                 out.eerror("")
14699                                 msg = "The resume list contains packages " + \
14700                                         "that are either masked or have " + \
14701                                         "unsatisfied dependencies. " + \
14702                                         "Please restart/continue " + \
14703                                         "the operation manually, or use --skipfirst " + \
14704                                         "to skip the first package in the list and " + \
14705                                         "any other packages that may be " + \
14706                                         "masked or have missing dependencies."
14707                                 for line in wrap(msg, 72):
14708                                         out.eerror(line)
14709                         elif isinstance(e, portage.exception.PackageNotFound):
14710                                 out.eerror("An expected package is " + \
14711                                         "not available: %s" % str(e))
14712                                 out.eerror("")
14713                                 msg = "The resume list contains one or more " + \
14714                                         "packages that are no longer " + \
14715                                         "available. Please restart/continue " + \
14716                                         "the operation manually."
14717                                 for line in wrap(msg, 72):
14718                                         out.eerror(line)
14719                 else:
14720                         if show_spinner:
14721                                 print "\b\b... done!"
14722
14723                 if success:
14724                         if dropped_tasks:
14725                                 portage.writemsg("!!! One or more packages have been " + \
14726                                         "dropped due to\n" + \
14727                                         "!!! masking or unsatisfied dependencies:\n\n",
14728                                         noiselevel=-1)
14729                                 for task in dropped_tasks:
14730                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14731                                 portage.writemsg("\n", noiselevel=-1)
14732                         del dropped_tasks
14733                 else:
14734                         if mydepgraph is not None:
14735                                 mydepgraph.display_problems()
14736                         if not (ask or pretend):
14737                                 # delete the current list and also the backup
14738                                 # since it's probably stale too.
14739                                 for k in ("resume", "resume_backup"):
14740                                         mtimedb.pop(k, None)
14741                                 mtimedb.commit()
14742
14743                         return 1
14744         else:
14745                 if ("--resume" in myopts):
14746                         print darkgreen("emerge: It seems we have nothing to resume...")
14747                         return os.EX_OK
14748
14749                 myparams = create_depgraph_params(myopts, myaction)
14750                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14751                         print "Calculating dependencies  ",
14752                         sys.stdout.flush()
14753                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14754                 try:
14755                         retval, favorites = mydepgraph.select_files(myfiles)
14756                 except portage.exception.PackageNotFound, e:
14757                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14758                         return 1
14759                 except portage.exception.PackageSetNotFound, e:
14760                         root_config = trees[settings["ROOT"]]["root_config"]
14761                         display_missing_pkg_set(root_config, e.value)
14762                         return 1
14763                 if show_spinner:
14764                         print "\b\b... done!"
14765                 if not retval:
14766                         mydepgraph.display_problems()
14767                         return 1
14768
14769         if "--pretend" not in myopts and \
14770                 ("--ask" in myopts or "--tree" in myopts or \
14771                 "--verbose" in myopts) and \
14772                 not ("--quiet" in myopts and "--ask" not in myopts):
14773                 if "--resume" in myopts:
14774                         mymergelist = mydepgraph.altlist()
14775                         if len(mymergelist) == 0:
14776                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14777                                 return os.EX_OK
14778                         favorites = mtimedb["resume"]["favorites"]
14779                         retval = mydepgraph.display(
14780                                 mydepgraph.altlist(reversed=tree),
14781                                 favorites=favorites)
14782                         mydepgraph.display_problems()
14783                         if retval != os.EX_OK:
14784                                 return retval
14785                         prompt="Would you like to resume merging these packages?"
14786                 else:
14787                         retval = mydepgraph.display(
14788                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14789                                 favorites=favorites)
14790                         mydepgraph.display_problems()
14791                         if retval != os.EX_OK:
14792                                 return retval
14793                         mergecount=0
14794                         for x in mydepgraph.altlist():
14795                                 if isinstance(x, Package) and x.operation == "merge":
14796                                         mergecount += 1
14797
14798                         if mergecount==0:
14799                                 sets = trees[settings["ROOT"]]["root_config"].sets
14800                                 world_candidates = None
14801                                 if "--noreplace" in myopts and \
14802                                         not oneshot and favorites:
14803                                         # Sets that are not world candidates are filtered
14804                                         # out here since the favorites list needs to be
14805                                         # complete for depgraph.loadResumeCommand() to
14806                                         # operate correctly.
14807                                         world_candidates = [x for x in favorites \
14808                                                 if not (x.startswith(SETPREFIX) and \
14809                                                 not sets[x[1:]].world_candidate)]
14810                                 if "--noreplace" in myopts and \
14811                                         not oneshot and world_candidates:
14812                                         print
14813                                         for x in world_candidates:
14814                                                 print " %s %s" % (good("*"), x)
14815                                         prompt="Would you like to add these packages to your world favorites?"
14816                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14817                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14818                                 else:
14819                                         print
14820                                         print "Nothing to merge; quitting."
14821                                         print
14822                                         return os.EX_OK
14823                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14824                                 prompt="Would you like to fetch the source files for these packages?"
14825                         else:
14826                                 prompt="Would you like to merge these packages?"
14827                 print
14828                 if "--ask" in myopts and userquery(prompt) == "No":
14829                         print
14830                         print "Quitting."
14831                         print
14832                         return os.EX_OK
14833                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14834                 myopts.pop("--ask", None)
14835
14836         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14837                 if ("--resume" in myopts):
14838                         mymergelist = mydepgraph.altlist()
14839                         if len(mymergelist) == 0:
14840                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14841                                 return os.EX_OK
14842                         favorites = mtimedb["resume"]["favorites"]
14843                         retval = mydepgraph.display(
14844                                 mydepgraph.altlist(reversed=tree),
14845                                 favorites=favorites)
14846                         mydepgraph.display_problems()
14847                         if retval != os.EX_OK:
14848                                 return retval
14849                 else:
14850                         retval = mydepgraph.display(
14851                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14852                                 favorites=favorites)
14853                         mydepgraph.display_problems()
14854                         if retval != os.EX_OK:
14855                                 return retval
14856                         if "--buildpkgonly" in myopts:
14857                                 graph_copy = mydepgraph.digraph.clone()
14858                                 removed_nodes = set()
14859                                 for node in graph_copy:
14860                                         if not isinstance(node, Package) or \
14861                                                 node.operation == "nomerge":
14862                                                 removed_nodes.add(node)
14863                                 graph_copy.difference_update(removed_nodes)
14864                                 if not graph_copy.hasallzeros(ignore_priority = \
14865                                         DepPrioritySatisfiedRange.ignore_medium):
14866                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14867                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14868                                         return 1
14869         else:
14870                 if "--buildpkgonly" in myopts:
14871                         graph_copy = mydepgraph.digraph.clone()
14872                         removed_nodes = set()
14873                         for node in graph_copy:
14874                                 if not isinstance(node, Package) or \
14875                                         node.operation == "nomerge":
14876                                         removed_nodes.add(node)
14877                         graph_copy.difference_update(removed_nodes)
14878                         if not graph_copy.hasallzeros(ignore_priority = \
14879                                 DepPrioritySatisfiedRange.ignore_medium):
14880                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14881                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14882                                 return 1
14883
14884                 if ("--resume" in myopts):
14885                         favorites=mtimedb["resume"]["favorites"]
14886                         mymergelist = mydepgraph.altlist()
14887                         mydepgraph.break_refs(mymergelist)
14888                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14889                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14890                         del mydepgraph, mymergelist
14891                         clear_caches(trees)
14892
14893                         retval = mergetask.merge()
14894                         merge_count = mergetask.curval
14895                 else:
14896                         if "resume" in mtimedb and \
14897                         "mergelist" in mtimedb["resume"] and \
14898                         len(mtimedb["resume"]["mergelist"]) > 1:
14899                                 mtimedb["resume_backup"] = mtimedb["resume"]
14900                                 del mtimedb["resume"]
14901                                 mtimedb.commit()
14902                         mtimedb["resume"]={}
14903                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14904                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14905                         # a list type for options.
14906                         mtimedb["resume"]["myopts"] = myopts.copy()
14907
14908                         # Convert Atom instances to plain str.
14909                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14910
14911                         pkglist = mydepgraph.altlist()
14912                         mydepgraph.saveNomergeFavorites()
14913                         mydepgraph.break_refs(pkglist)
14914                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14915                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14916                         del mydepgraph, pkglist
14917                         clear_caches(trees)
14918
14919                         retval = mergetask.merge()
14920                         merge_count = mergetask.curval
14921
14922                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14923                         if "yes" == settings.get("AUTOCLEAN"):
14924                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14925                                 unmerge(trees[settings["ROOT"]]["root_config"],
14926                                         myopts, "clean", [],
14927                                         ldpath_mtimes, autoclean=1)
14928                         else:
14929                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14930                                         + " AUTOCLEAN is disabled.  This can cause serious"
14931                                         + " problems due to overlapping packages.\n")
14932                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14933
14934                 return retval
14935
14936 def multiple_actions(action1, action2):
14937         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14938         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14939         sys.exit(1)
14940
14941 def insert_optional_args(args):
14942         """
14943         Parse optional arguments and insert a value if one has
14944         not been provided. This is done before feeding the args
14945         to the optparse parser since that parser does not support
14946         this feature natively.
14947         """
14948
14949         new_args = []
14950         jobs_opts = ("-j", "--jobs")
14951         root_deps_opt = '--root-deps'
14952         root_deps_choices = ('True', 'rdeps')
14953         arg_stack = args[:]
14954         arg_stack.reverse()
14955         while arg_stack:
14956                 arg = arg_stack.pop()
14957
14958                 if arg == root_deps_opt:
14959                         new_args.append(arg)
14960                         if arg_stack and arg_stack[-1] in root_deps_choices:
14961                                 new_args.append(arg_stack.pop())
14962                         else:
14963                                 # insert default argument
14964                                 new_args.append('True')
14965                         continue
14966
14967                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14968                 if not (short_job_opt or arg in jobs_opts):
14969                         new_args.append(arg)
14970                         continue
14971
14972                 # Insert an empty placeholder in order to
14973                 # satisfy the requirements of optparse.
14974
14975                 new_args.append("--jobs")
14976                 job_count = None
14977                 saved_opts = None
14978                 if short_job_opt and len(arg) > 2:
14979                         if arg[:2] == "-j":
14980                                 try:
14981                                         job_count = int(arg[2:])
14982                                 except ValueError:
14983                                         saved_opts = arg[2:]
14984                         else:
14985                                 job_count = "True"
14986                                 saved_opts = arg[1:].replace("j", "")
14987
14988                 if job_count is None and arg_stack:
14989                         try:
14990                                 job_count = int(arg_stack[-1])
14991                         except ValueError:
14992                                 pass
14993                         else:
14994                                 # Discard the job count from the stack
14995                                 # since we're consuming it here.
14996                                 arg_stack.pop()
14997
14998                 if job_count is None:
14999                         # unlimited number of jobs
15000                         new_args.append("True")
15001                 else:
15002                         new_args.append(str(job_count))
15003
15004                 if saved_opts is not None:
15005                         new_args.append("-" + saved_opts)
15006
15007         return new_args
15008
15009 def parse_opts(tmpcmdline, silent=False):
15010         myaction=None
15011         myopts = {}
15012         myfiles=[]
15013
15014         global actions, options, shortmapping
15015
15016         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15017         argument_options = {
15018                 "--config-root": {
15019                         "help":"specify the location for portage configuration files",
15020                         "action":"store"
15021                 },
15022                 "--color": {
15023                         "help":"enable or disable color output",
15024                         "type":"choice",
15025                         "choices":("y", "n")
15026                 },
15027
15028                 "--jobs": {
15029
15030                         "help"   : "Specifies the number of packages to build " + \
15031                                 "simultaneously.",
15032
15033                         "action" : "store"
15034                 },
15035
15036                 "--load-average": {
15037
15038                         "help"   :"Specifies that no new builds should be started " + \
15039                                 "if there are other builds running and the load average " + \
15040                                 "is at least LOAD (a floating-point number).",
15041
15042                         "action" : "store"
15043                 },
15044
15045                 "--with-bdeps": {
15046                         "help":"include unnecessary build time dependencies",
15047                         "type":"choice",
15048                         "choices":("y", "n")
15049                 },
15050                 "--reinstall": {
15051                         "help":"specify conditions to trigger package reinstallation",
15052                         "type":"choice",
15053                         "choices":["changed-use"]
15054                 },
15055                 "--root": {
15056                  "help"   : "specify the target root filesystem for merging packages",
15057                  "action" : "store"
15058                 },
15059
15060                 "--root-deps": {
15061                         "help"    : "modify interpretation of depedencies",
15062                         "type"    : "choice",
15063                         "choices" :("True", "rdeps")
15064                 },
15065         }
15066
15067         from optparse import OptionParser
15068         parser = OptionParser()
15069         if parser.has_option("--help"):
15070                 parser.remove_option("--help")
15071
15072         for action_opt in actions:
15073                 parser.add_option("--" + action_opt, action="store_true",
15074                         dest=action_opt.replace("-", "_"), default=False)
15075         for myopt in options:
15076                 parser.add_option(myopt, action="store_true",
15077                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15078         for shortopt, longopt in shortmapping.iteritems():
15079                 parser.add_option("-" + shortopt, action="store_true",
15080                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
15081         for myalias, myopt in longopt_aliases.iteritems():
15082                 parser.add_option(myalias, action="store_true",
15083                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15084
15085         for myopt, kwargs in argument_options.iteritems():
15086                 parser.add_option(myopt,
15087                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15088
15089         tmpcmdline = insert_optional_args(tmpcmdline)
15090
15091         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15092
15093         if myoptions.root_deps == "True":
15094                 myoptions.root_deps = True
15095
15096         if myoptions.jobs:
15097                 jobs = None
15098                 if myoptions.jobs == "True":
15099                         jobs = True
15100                 else:
15101                         try:
15102                                 jobs = int(myoptions.jobs)
15103                         except ValueError:
15104                                 jobs = -1
15105
15106                 if jobs is not True and \
15107                         jobs < 1:
15108                         jobs = None
15109                         if not silent:
15110                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15111                                         (myoptions.jobs,), noiselevel=-1)
15112
15113                 myoptions.jobs = jobs
15114
15115         if myoptions.load_average:
15116                 try:
15117                         load_average = float(myoptions.load_average)
15118                 except ValueError:
15119                         load_average = 0.0
15120
15121                 if load_average <= 0.0:
15122                         load_average = None
15123                         if not silent:
15124                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15125                                         (myoptions.load_average,), noiselevel=-1)
15126
15127                 myoptions.load_average = load_average
15128
15129         for myopt in options:
15130                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15131                 if v:
15132                         myopts[myopt] = True
15133
15134         for myopt in argument_options:
15135                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15136                 if v is not None:
15137                         myopts[myopt] = v
15138
15139         if myoptions.searchdesc:
15140                 myoptions.search = True
15141
15142         for action_opt in actions:
15143                 v = getattr(myoptions, action_opt.replace("-", "_"))
15144                 if v:
15145                         if myaction:
15146                                 multiple_actions(myaction, action_opt)
15147                                 sys.exit(1)
15148                         myaction = action_opt
15149
15150         myfiles += myargs
15151
15152         return myaction, myopts, myfiles
15153
15154 def validate_ebuild_environment(trees):
15155         for myroot in trees:
15156                 settings = trees[myroot]["vartree"].settings
15157                 settings.validate()
15158
15159 def clear_caches(trees):
15160         for d in trees.itervalues():
15161                 d["porttree"].dbapi.melt()
15162                 d["porttree"].dbapi._aux_cache.clear()
15163                 d["bintree"].dbapi._aux_cache.clear()
15164                 d["bintree"].dbapi._clear_cache()
15165                 d["vartree"].dbapi.linkmap._clear_cache()
15166         portage.dircache.clear()
15167         gc.collect()
15168
15169 def load_emerge_config(trees=None):
15170         kwargs = {}
15171         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15172                 v = os.environ.get(envvar, None)
15173                 if v and v.strip():
15174                         kwargs[k] = v
15175         trees = portage.create_trees(trees=trees, **kwargs)
15176
15177         for root, root_trees in trees.iteritems():
15178                 settings = root_trees["vartree"].settings
15179                 setconfig = load_default_config(settings, root_trees)
15180                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15181
15182         settings = trees["/"]["vartree"].settings
15183
15184         for myroot in trees:
15185                 if myroot != "/":
15186                         settings = trees[myroot]["vartree"].settings
15187                         break
15188
15189         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15190         mtimedb = portage.MtimeDB(mtimedbfile)
15191         
15192         return settings, trees, mtimedb
15193
15194 def adjust_config(myopts, settings):
15195         """Make emerge specific adjustments to the config."""
15196
15197         # To enhance usability, make some vars case insensitive by forcing them to
15198         # lower case.
15199         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15200                 if myvar in settings:
15201                         settings[myvar] = settings[myvar].lower()
15202                         settings.backup_changes(myvar)
15203         del myvar
15204
15205         # Kill noauto as it will break merges otherwise.
15206         if "noauto" in settings.features:
15207                 settings.features.remove('noauto')
15208                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15209                 settings.backup_changes("FEATURES")
15210
15211         CLEAN_DELAY = 5
15212         try:
15213                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15214         except ValueError, e:
15215                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15216                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15217                         settings["CLEAN_DELAY"], noiselevel=-1)
15218         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15219         settings.backup_changes("CLEAN_DELAY")
15220
15221         EMERGE_WARNING_DELAY = 10
15222         try:
15223                 EMERGE_WARNING_DELAY = int(settings.get(
15224                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15225         except ValueError, e:
15226                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15227                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15228                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15229         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15230         settings.backup_changes("EMERGE_WARNING_DELAY")
15231
15232         if "--quiet" in myopts:
15233                 settings["PORTAGE_QUIET"]="1"
15234                 settings.backup_changes("PORTAGE_QUIET")
15235
15236         if "--verbose" in myopts:
15237                 settings["PORTAGE_VERBOSE"] = "1"
15238                 settings.backup_changes("PORTAGE_VERBOSE")
15239
15240         # Set so that configs will be merged regardless of remembered status
15241         if ("--noconfmem" in myopts):
15242                 settings["NOCONFMEM"]="1"
15243                 settings.backup_changes("NOCONFMEM")
15244
15245         # Set various debug markers... They should be merged somehow.
15246         PORTAGE_DEBUG = 0
15247         try:
15248                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15249                 if PORTAGE_DEBUG not in (0, 1):
15250                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15251                                 PORTAGE_DEBUG, noiselevel=-1)
15252                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15253                                 noiselevel=-1)
15254                         PORTAGE_DEBUG = 0
15255         except ValueError, e:
15256                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15257                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15258                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15259                 del e
15260         if "--debug" in myopts:
15261                 PORTAGE_DEBUG = 1
15262         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15263         settings.backup_changes("PORTAGE_DEBUG")
15264
15265         if settings.get("NOCOLOR") not in ("yes","true"):
15266                 portage.output.havecolor = 1
15267
15268         """The explicit --color < y | n > option overrides the NOCOLOR environment
15269         variable and stdout auto-detection."""
15270         if "--color" in myopts:
15271                 if "y" == myopts["--color"]:
15272                         portage.output.havecolor = 1
15273                         settings["NOCOLOR"] = "false"
15274                 else:
15275                         portage.output.havecolor = 0
15276                         settings["NOCOLOR"] = "true"
15277                 settings.backup_changes("NOCOLOR")
15278         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15279                 portage.output.havecolor = 0
15280                 settings["NOCOLOR"] = "true"
15281                 settings.backup_changes("NOCOLOR")
15282
15283 def apply_priorities(settings):
15284         ionice(settings)
15285         nice(settings)
15286
15287 def nice(settings):
15288         try:
15289                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15290         except (OSError, ValueError), e:
15291                 out = portage.output.EOutput()
15292                 out.eerror("Failed to change nice value to '%s'" % \
15293                         settings["PORTAGE_NICENESS"])
15294                 out.eerror("%s\n" % str(e))
15295
15296 def ionice(settings):
15297
15298         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15299         if ionice_cmd:
15300                 ionice_cmd = shlex.split(ionice_cmd)
15301         if not ionice_cmd:
15302                 return
15303
15304         from portage.util import varexpand
15305         variables = {"PID" : str(os.getpid())}
15306         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15307
15308         try:
15309                 rval = portage.process.spawn(cmd, env=os.environ)
15310         except portage.exception.CommandNotFound:
15311                 # The OS kernel probably doesn't support ionice,
15312                 # so return silently.
15313                 return
15314
15315         if rval != os.EX_OK:
15316                 out = portage.output.EOutput()
15317                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15318                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15319
15320 def display_missing_pkg_set(root_config, set_name):
15321
15322         msg = []
15323         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15324                 "The following sets exist:") % \
15325                 colorize("INFORM", set_name))
15326         msg.append("")
15327
15328         for s in sorted(root_config.sets):
15329                 msg.append("    %s" % s)
15330         msg.append("")
15331
15332         writemsg_level("".join("%s\n" % l for l in msg),
15333                 level=logging.ERROR, noiselevel=-1)
15334
15335 def expand_set_arguments(myfiles, myaction, root_config):
15336         retval = os.EX_OK
15337         setconfig = root_config.setconfig
15338
15339         sets = setconfig.getSets()
15340
15341         # In order to know exactly which atoms/sets should be added to the
15342         # world file, the depgraph performs set expansion later. It will get
15343         # confused about where the atoms came from if it's not allowed to
15344         # expand them itself.
15345         do_not_expand = (None, )
15346         newargs = []
15347         for a in myfiles:
15348                 if a in ("system", "world"):
15349                         newargs.append(SETPREFIX+a)
15350                 else:
15351                         newargs.append(a)
15352         myfiles = newargs
15353         del newargs
15354         newargs = []
15355
15356         # separators for set arguments
15357         ARG_START = "{"
15358         ARG_END = "}"
15359
15360         # WARNING: all operators must be of equal length
15361         IS_OPERATOR = "/@"
15362         DIFF_OPERATOR = "-@"
15363         UNION_OPERATOR = "+@"
15364         
15365         for i in range(0, len(myfiles)):
15366                 if myfiles[i].startswith(SETPREFIX):
15367                         start = 0
15368                         end = 0
15369                         x = myfiles[i][len(SETPREFIX):]
15370                         newset = ""
15371                         while x:
15372                                 start = x.find(ARG_START)
15373                                 end = x.find(ARG_END)
15374                                 if start > 0 and start < end:
15375                                         namepart = x[:start]
15376                                         argpart = x[start+1:end]
15377                                 
15378                                         # TODO: implement proper quoting
15379                                         args = argpart.split(",")
15380                                         options = {}
15381                                         for a in args:
15382                                                 if "=" in a:
15383                                                         k, v  = a.split("=", 1)
15384                                                         options[k] = v
15385                                                 else:
15386                                                         options[a] = "True"
15387                                         setconfig.update(namepart, options)
15388                                         newset += (x[:start-len(namepart)]+namepart)
15389                                         x = x[end+len(ARG_END):]
15390                                 else:
15391                                         newset += x
15392                                         x = ""
15393                         myfiles[i] = SETPREFIX+newset
15394                                 
15395         sets = setconfig.getSets()
15396
15397         # display errors that occured while loading the SetConfig instance
15398         for e in setconfig.errors:
15399                 print colorize("BAD", "Error during set creation: %s" % e)
15400         
15401         # emerge relies on the existance of sets with names "world" and "system"
15402         required_sets = ("world", "system")
15403         missing_sets = []
15404
15405         for s in required_sets:
15406                 if s not in sets:
15407                         missing_sets.append(s)
15408         if missing_sets:
15409                 if len(missing_sets) > 2:
15410                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15411                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15412                 elif len(missing_sets) == 2:
15413                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15414                 else:
15415                         missing_sets_str = '"%s"' % missing_sets[-1]
15416                 msg = ["emerge: incomplete set configuration, " + \
15417                         "missing set(s): %s" % missing_sets_str]
15418                 if sets:
15419                         msg.append("        sets defined: %s" % ", ".join(sets))
15420                 msg.append("        This usually means that '%s'" % \
15421                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15422                 msg.append("        is missing or corrupt.")
15423                 for line in msg:
15424                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15425                 return (None, 1)
15426         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15427
15428         for a in myfiles:
15429                 if a.startswith(SETPREFIX):
15430                         # support simple set operations (intersection, difference and union)
15431                         # on the commandline. Expressions are evaluated strictly left-to-right
15432                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15433                                 expression = a[len(SETPREFIX):]
15434                                 expr_sets = []
15435                                 expr_ops = []
15436                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15437                                         is_pos = expression.rfind(IS_OPERATOR)
15438                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15439                                         union_pos = expression.rfind(UNION_OPERATOR)
15440                                         op_pos = max(is_pos, diff_pos, union_pos)
15441                                         s1 = expression[:op_pos]
15442                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15443                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15444                                         if not s2 in sets:
15445                                                 display_missing_pkg_set(root_config, s2)
15446                                                 return (None, 1)
15447                                         expr_sets.insert(0, s2)
15448                                         expr_ops.insert(0, op)
15449                                         expression = s1
15450                                 if not expression in sets:
15451                                         display_missing_pkg_set(root_config, expression)
15452                                         return (None, 1)
15453                                 expr_sets.insert(0, expression)
15454                                 result = set(setconfig.getSetAtoms(expression))
15455                                 for i in range(0, len(expr_ops)):
15456                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15457                                         if expr_ops[i] == IS_OPERATOR:
15458                                                 result.intersection_update(s2)
15459                                         elif expr_ops[i] == DIFF_OPERATOR:
15460                                                 result.difference_update(s2)
15461                                         elif expr_ops[i] == UNION_OPERATOR:
15462                                                 result.update(s2)
15463                                         else:
15464                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15465                                 newargs.extend(result)
15466                         else:                   
15467                                 s = a[len(SETPREFIX):]
15468                                 if s not in sets:
15469                                         display_missing_pkg_set(root_config, s)
15470                                         return (None, 1)
15471                                 setconfig.active.append(s)
15472                                 try:
15473                                         set_atoms = setconfig.getSetAtoms(s)
15474                                 except portage.exception.PackageSetNotFound, e:
15475                                         writemsg_level(("emerge: the given set '%s' " + \
15476                                                 "contains a non-existent set named '%s'.\n") % \
15477                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15478                                         return (None, 1)
15479                                 if myaction in unmerge_actions and \
15480                                                 not sets[s].supportsOperation("unmerge"):
15481                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15482                                                 "not support unmerge operations\n")
15483                                         retval = 1
15484                                 elif not set_atoms:
15485                                         print "emerge: '%s' is an empty set" % s
15486                                 elif myaction not in do_not_expand:
15487                                         newargs.extend(set_atoms)
15488                                 else:
15489                                         newargs.append(SETPREFIX+s)
15490                                 for e in sets[s].errors:
15491                                         print e
15492                 else:
15493                         newargs.append(a)
15494         return (newargs, retval)
15495
15496 def repo_name_check(trees):
15497         missing_repo_names = set()
15498         for root, root_trees in trees.iteritems():
15499                 if "porttree" in root_trees:
15500                         portdb = root_trees["porttree"].dbapi
15501                         missing_repo_names.update(portdb.porttrees)
15502                         repos = portdb.getRepositories()
15503                         for r in repos:
15504                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15505                         if portdb.porttree_root in missing_repo_names and \
15506                                 not os.path.exists(os.path.join(
15507                                 portdb.porttree_root, "profiles")):
15508                                 # This is normal if $PORTDIR happens to be empty,
15509                                 # so don't warn about it.
15510                                 missing_repo_names.remove(portdb.porttree_root)
15511
15512         if missing_repo_names:
15513                 msg = []
15514                 msg.append("WARNING: One or more repositories " + \
15515                         "have missing repo_name entries:")
15516                 msg.append("")
15517                 for p in missing_repo_names:
15518                         msg.append("\t%s/profiles/repo_name" % (p,))
15519                 msg.append("")
15520                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15521                         "should be a plain text file containing a unique " + \
15522                         "name for the repository on the first line.", 70))
15523                 writemsg_level("".join("%s\n" % l for l in msg),
15524                         level=logging.WARNING, noiselevel=-1)
15525
15526         return bool(missing_repo_names)
15527
15528 def config_protect_check(trees):
15529         for root, root_trees in trees.iteritems():
15530                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15531                         msg = "!!! CONFIG_PROTECT is empty"
15532                         if root != "/":
15533                                 msg += " for '%s'" % root
15534                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15535
15536 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15537
15538         if "--quiet" in myopts:
15539                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15540                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15541                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15542                         print "    " + colorize("INFORM", cp)
15543                 return
15544
15545         s = search(root_config, spinner, "--searchdesc" in myopts,
15546                 "--quiet" not in myopts, "--usepkg" in myopts,
15547                 "--usepkgonly" in myopts)
15548         null_cp = portage.dep_getkey(insert_category_into_atom(
15549                 arg, "null"))
15550         cat, atom_pn = portage.catsplit(null_cp)
15551         s.searchkey = atom_pn
15552         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15553                 s.addCP(cp)
15554         s.output()
15555         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15556         print "!!! one of the above fully-qualified ebuild names instead.\n"
15557
15558 def profile_check(trees, myaction, myopts):
15559         if myaction in ("info", "sync"):
15560                 return os.EX_OK
15561         elif "--version" in myopts or "--help" in myopts:
15562                 return os.EX_OK
15563         for root, root_trees in trees.iteritems():
15564                 if root_trees["root_config"].settings.profiles:
15565                         continue
15566                 # generate some profile related warning messages
15567                 validate_ebuild_environment(trees)
15568                 msg = "If you have just changed your profile configuration, you " + \
15569                         "should revert back to the previous configuration. Due to " + \
15570                         "your current profile being invalid, allowed actions are " + \
15571                         "limited to --help, --info, --sync, and --version."
15572                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15573                         level=logging.ERROR, noiselevel=-1)
15574                 return 1
15575         return os.EX_OK
15576
15577 def emerge_main():
15578         global portage  # NFC why this is necessary now - genone
15579         portage._disable_legacy_globals()
15580         # Disable color until we're sure that it should be enabled (after
15581         # EMERGE_DEFAULT_OPTS has been parsed).
15582         portage.output.havecolor = 0
15583         # This first pass is just for options that need to be known as early as
15584         # possible, such as --config-root.  They will be parsed again later,
15585         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15586         # the value of --config-root).
15587         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15588         if "--debug" in myopts:
15589                 os.environ["PORTAGE_DEBUG"] = "1"
15590         if "--config-root" in myopts:
15591                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15592         if "--root" in myopts:
15593                 os.environ["ROOT"] = myopts["--root"]
15594
15595         # Portage needs to ensure a sane umask for the files it creates.
15596         os.umask(022)
15597         settings, trees, mtimedb = load_emerge_config()
15598         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15599         rval = profile_check(trees, myaction, myopts)
15600         if rval != os.EX_OK:
15601                 return rval
15602
15603         if portage._global_updates(trees, mtimedb["updates"]):
15604                 mtimedb.commit()
15605                 # Reload the whole config from scratch.
15606                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15607                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15608
15609         xterm_titles = "notitles" not in settings.features
15610
15611         tmpcmdline = []
15612         if "--ignore-default-opts" not in myopts:
15613                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15614         tmpcmdline.extend(sys.argv[1:])
15615         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15616
15617         if "--digest" in myopts:
15618                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15619                 # Reload the whole config from scratch so that the portdbapi internal
15620                 # config is updated with new FEATURES.
15621                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15622                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15623
15624         for myroot in trees:
15625                 mysettings =  trees[myroot]["vartree"].settings
15626                 mysettings.unlock()
15627                 adjust_config(myopts, mysettings)
15628                 if '--pretend' not in myopts and myaction in \
15629                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15630                         mysettings["PORTAGE_COUNTER_HASH"] = \
15631                                 trees[myroot]["vartree"].dbapi._counter_hash()
15632                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15633                 mysettings.lock()
15634                 del myroot, mysettings
15635
15636         apply_priorities(settings)
15637
15638         spinner = stdout_spinner()
15639         if "candy" in settings.features:
15640                 spinner.update = spinner.update_scroll
15641
15642         if "--quiet" not in myopts:
15643                 portage.deprecated_profile_check(settings=settings)
15644                 repo_name_check(trees)
15645                 config_protect_check(trees)
15646
15647         for mytrees in trees.itervalues():
15648                 mydb = mytrees["porttree"].dbapi
15649                 # Freeze the portdbapi for performance (memoize all xmatch results).
15650                 mydb.freeze()
15651         del mytrees, mydb
15652
15653         if "moo" in myfiles:
15654                 print """
15655
15656   Larry loves Gentoo (""" + platform.system() + """)
15657
15658  _______________________
15659 < Have you mooed today? >
15660  -----------------------
15661         \   ^__^
15662          \  (oo)\_______
15663             (__)\       )\/\ 
15664                 ||----w |
15665                 ||     ||
15666
15667 """
15668
15669         for x in myfiles:
15670                 ext = os.path.splitext(x)[1]
15671                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15672                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15673                         break
15674
15675         root_config = trees[settings["ROOT"]]["root_config"]
15676         if myaction == "list-sets":
15677                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15678                 sys.stdout.flush()
15679                 return os.EX_OK
15680
15681         # only expand sets for actions taking package arguments
15682         oldargs = myfiles[:]
15683         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15684                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15685                 if retval != os.EX_OK:
15686                         return retval
15687
15688                 # Need to handle empty sets specially, otherwise emerge will react 
15689                 # with the help message for empty argument lists
15690                 if oldargs and not myfiles:
15691                         print "emerge: no targets left after set expansion"
15692                         return 0
15693
15694         if ("--tree" in myopts) and ("--columns" in myopts):
15695                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15696                 return 1
15697
15698         if ("--quiet" in myopts):
15699                 spinner.update = spinner.update_quiet
15700                 portage.util.noiselimit = -1
15701
15702         # Always create packages if FEATURES=buildpkg
15703         # Imply --buildpkg if --buildpkgonly
15704         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15705                 if "--buildpkg" not in myopts:
15706                         myopts["--buildpkg"] = True
15707
15708         # Always try and fetch binary packages if FEATURES=getbinpkg
15709         if ("getbinpkg" in settings.features):
15710                 myopts["--getbinpkg"] = True
15711
15712         if "--buildpkgonly" in myopts:
15713                 # --buildpkgonly will not merge anything, so
15714                 # it cancels all binary package options.
15715                 for opt in ("--getbinpkg", "--getbinpkgonly",
15716                         "--usepkg", "--usepkgonly"):
15717                         myopts.pop(opt, None)
15718
15719         if "--fetch-all-uri" in myopts:
15720                 myopts["--fetchonly"] = True
15721
15722         if "--skipfirst" in myopts and "--resume" not in myopts:
15723                 myopts["--resume"] = True
15724
15725         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15726                 myopts["--usepkgonly"] = True
15727
15728         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15729                 myopts["--getbinpkg"] = True
15730
15731         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15732                 myopts["--usepkg"] = True
15733
15734         # Also allow -K to apply --usepkg/-k
15735         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15736                 myopts["--usepkg"] = True
15737
15738         # Allow -p to remove --ask
15739         if ("--pretend" in myopts) and ("--ask" in myopts):
15740                 print ">>> --pretend disables --ask... removing --ask from options."
15741                 del myopts["--ask"]
15742
15743         # forbid --ask when not in a terminal
15744         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15745         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15746                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15747                         noiselevel=-1)
15748                 return 1
15749
15750         if settings.get("PORTAGE_DEBUG", "") == "1":
15751                 spinner.update = spinner.update_quiet
15752                 portage.debug=1
15753                 if "python-trace" in settings.features:
15754                         import portage.debug
15755                         portage.debug.set_trace(True)
15756
15757         if not ("--quiet" in myopts):
15758                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15759                         spinner.update = spinner.update_basic
15760
15761         if myaction == 'version':
15762                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15763                         settings.profile_path, settings["CHOST"],
15764                         trees[settings["ROOT"]]["vartree"].dbapi)
15765                 return 0
15766         elif "--help" in myopts:
15767                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15768                 return 0
15769
15770         if "--debug" in myopts:
15771                 print "myaction", myaction
15772                 print "myopts", myopts
15773
15774         if not myaction and not myfiles and "--resume" not in myopts:
15775                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15776                 return 1
15777
15778         pretend = "--pretend" in myopts
15779         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15780         buildpkgonly = "--buildpkgonly" in myopts
15781
15782         # check if root user is the current user for the actions where emerge needs this
15783         if portage.secpass < 2:
15784                 # We've already allowed "--version" and "--help" above.
15785                 if "--pretend" not in myopts and myaction not in ("search","info"):
15786                         need_superuser = not \
15787                                 (fetchonly or \
15788                                 (buildpkgonly and secpass >= 1) or \
15789                                 myaction in ("metadata", "regen") or \
15790                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15791                         if portage.secpass < 1 or \
15792                                 need_superuser:
15793                                 if need_superuser:
15794                                         access_desc = "superuser"
15795                                 else:
15796                                         access_desc = "portage group"
15797                                 # Always show portage_group_warning() when only portage group
15798                                 # access is required but the user is not in the portage group.
15799                                 from portage.data import portage_group_warning
15800                                 if "--ask" in myopts:
15801                                         myopts["--pretend"] = True
15802                                         del myopts["--ask"]
15803                                         print ("%s access is required... " + \
15804                                                 "adding --pretend to options.\n") % access_desc
15805                                         if portage.secpass < 1 and not need_superuser:
15806                                                 portage_group_warning()
15807                                 else:
15808                                         sys.stderr.write(("emerge: %s access is " + \
15809                                                 "required.\n\n") % access_desc)
15810                                         if portage.secpass < 1 and not need_superuser:
15811                                                 portage_group_warning()
15812                                         return 1
15813
15814         disable_emergelog = False
15815         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15816                 if x in myopts:
15817                         disable_emergelog = True
15818                         break
15819         if myaction in ("search", "info"):
15820                 disable_emergelog = True
15821         if disable_emergelog:
15822                 """ Disable emergelog for everything except build or unmerge
15823                 operations.  This helps minimize parallel emerge.log entries that can
15824                 confuse log parsers.  We especially want it disabled during
15825                 parallel-fetch, which uses --resume --fetchonly."""
15826                 global emergelog
15827                 def emergelog(*pargs, **kargs):
15828                         pass
15829
15830         if not "--pretend" in myopts:
15831                 emergelog(xterm_titles, "Started emerge on: "+\
15832                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15833                 myelogstr=""
15834                 if myopts:
15835                         myelogstr=" ".join(myopts)
15836                 if myaction:
15837                         myelogstr+=" "+myaction
15838                 if myfiles:
15839                         myelogstr += " " + " ".join(oldargs)
15840                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15841         del oldargs
15842
15843         def emergeexitsig(signum, frame):
15844                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15845                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15846                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15847                 sys.exit(100+signum)
15848         signal.signal(signal.SIGINT, emergeexitsig)
15849         signal.signal(signal.SIGTERM, emergeexitsig)
15850
15851         def emergeexit():
15852                 """This gets out final log message in before we quit."""
15853                 if "--pretend" not in myopts:
15854                         emergelog(xterm_titles, " *** terminating.")
15855                 if "notitles" not in settings.features:
15856                         xtermTitleReset()
15857         portage.atexit_register(emergeexit)
15858
15859         if myaction in ("config", "metadata", "regen", "sync"):
15860                 if "--pretend" in myopts:
15861                         sys.stderr.write(("emerge: The '%s' action does " + \
15862                                 "not support '--pretend'.\n") % myaction)
15863                         return 1
15864
15865         if "sync" == myaction:
15866                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15867         elif "metadata" == myaction:
15868                 action_metadata(settings, portdb, myopts)
15869         elif myaction=="regen":
15870                 validate_ebuild_environment(trees)
15871                 return action_regen(settings, portdb, myopts.get("--jobs"),
15872                         myopts.get("--load-average"))
15873         # HELP action
15874         elif "config"==myaction:
15875                 validate_ebuild_environment(trees)
15876                 action_config(settings, trees, myopts, myfiles)
15877
15878         # SEARCH action
15879         elif "search"==myaction:
15880                 validate_ebuild_environment(trees)
15881                 action_search(trees[settings["ROOT"]]["root_config"],
15882                         myopts, myfiles, spinner)
15883
15884         elif myaction in ('clean', 'depclean', 'prune', 'unmerge'):
15885                 validate_ebuild_environment(trees)
15886                 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
15887                         myopts, myaction, myfiles, spinner)
15888                 if not (buildpkgonly or fetchonly or pretend):
15889                         post_emerge(root_config, myopts, mtimedb, rval)
15890                 return rval
15891
15892         elif myaction == 'info':
15893
15894                 # Ensure atoms are valid before calling unmerge().
15895                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15896                 valid_atoms = []
15897                 for x in myfiles:
15898                         if is_valid_package_atom(x):
15899                                 try:
15900                                         valid_atoms.append(
15901                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15902                                 except portage.exception.AmbiguousPackageName, e:
15903                                         msg = "The short ebuild name \"" + x + \
15904                                                 "\" is ambiguous.  Please specify " + \
15905                                                 "one of the following " + \
15906                                                 "fully-qualified ebuild names instead:"
15907                                         for line in textwrap.wrap(msg, 70):
15908                                                 writemsg_level("!!! %s\n" % (line,),
15909                                                         level=logging.ERROR, noiselevel=-1)
15910                                         for i in e[0]:
15911                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15912                                                         level=logging.ERROR, noiselevel=-1)
15913                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15914                                         return 1
15915                                 continue
15916                         msg = []
15917                         msg.append("'%s' is not a valid package atom." % (x,))
15918                         msg.append("Please check ebuild(5) for full details.")
15919                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15920                                 level=logging.ERROR, noiselevel=-1)
15921                         return 1
15922
15923                 return action_info(settings, trees, myopts, valid_atoms)
15924
15925         # "update", "system", or just process files:
15926         else:
15927                 validate_ebuild_environment(trees)
15928
15929                 for x in myfiles:
15930                         if x.startswith(SETPREFIX) or \
15931                                 is_valid_package_atom(x):
15932                                 continue
15933                         if x[:1] == os.sep:
15934                                 continue
15935                         try:
15936                                 os.lstat(x)
15937                                 continue
15938                         except OSError:
15939                                 pass
15940                         msg = []
15941                         msg.append("'%s' is not a valid package atom." % (x,))
15942                         msg.append("Please check ebuild(5) for full details.")
15943                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15944                                 level=logging.ERROR, noiselevel=-1)
15945                         return 1
15946
15947                 if "--pretend" not in myopts:
15948                         display_news_notification(root_config, myopts)
15949                 retval = action_build(settings, trees, mtimedb,
15950                         myopts, myaction, myfiles, spinner)
15951                 root_config = trees[settings["ROOT"]]["root_config"]
15952                 post_emerge(root_config, myopts, mtimedb, retval)
15953
15954                 return retval