Update code for removing noauto from FEATURES since config.features is a
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if isinstance(mysize, basestring):
282                 return mysize
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 self.sets = self.setconfig.getSets()
774                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775
776 def create_world_atom(pkg, args_set, root_config):
777         """Create a new atom for the world file if one does not exist.  If the
778         argument atom is precise enough to identify a specific slot then a slot
779         atom will be returned. Atoms that are in the system set may also be stored
780         in world since system atoms can only match one slot while world atoms can
781         be greedy with respect to slots.  Unslotted system packages will not be
782         stored in world."""
783
784         arg_atom = args_set.findAtomForPackage(pkg)
785         if not arg_atom:
786                 return None
787         cp = portage.dep_getkey(arg_atom)
788         new_world_atom = cp
789         sets = root_config.sets
790         portdb = root_config.trees["porttree"].dbapi
791         vardb = root_config.trees["vartree"].dbapi
792         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793                 for cpv in portdb.match(cp))
794         slotted = len(available_slots) > 1 or \
795                 (len(available_slots) == 1 and "0" not in available_slots)
796         if not slotted:
797                 # check the vdb in case this is multislot
798                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799                         for cpv in vardb.match(cp))
800                 slotted = len(available_slots) > 1 or \
801                         (len(available_slots) == 1 and "0" not in available_slots)
802         if slotted and arg_atom != cp:
803                 # If the user gave a specific atom, store it as a
804                 # slot atom in the world file.
805                 slot_atom = pkg.slot_atom
806
807                 # For USE=multislot, there are a couple of cases to
808                 # handle here:
809                 #
810                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811                 #    unknown value, so just record an unslotted atom.
812                 #
813                 # 2) SLOT comes from an installed package and there is no
814                 #    matching SLOT in the portage tree.
815                 #
816                 # Make sure that the slot atom is available in either the
817                 # portdb or the vardb, since otherwise the user certainly
818                 # doesn't want the SLOT atom recorded in the world file
819                 # (case 1 above).  If it's only available in the vardb,
820                 # the user may be trying to prevent a USE=multislot
821                 # package from being removed by --depclean (case 2 above).
822
823                 mydb = portdb
824                 if not portdb.match(slot_atom):
825                         # SLOT seems to come from an installed multislot package
826                         mydb = vardb
827                 # If there is no installed package matching the SLOT atom,
828                 # it probably changed SLOT spontaneously due to USE=multislot,
829                 # so just record an unslotted atom.
830                 if vardb.match(slot_atom):
831                         # Now verify that the argument is precise
832                         # enough to identify a specific slot.
833                         matches = mydb.match(arg_atom)
834                         matched_slots = set()
835                         for cpv in matches:
836                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837                         if len(matched_slots) == 1:
838                                 new_world_atom = slot_atom
839
840         if new_world_atom == sets["world"].findAtomForPackage(pkg):
841                 # Both atoms would be identical, so there's nothing to add.
842                 return None
843         if not slotted:
844                 # Unlike world atoms, system atoms are not greedy for slots, so they
845                 # can't be safely excluded from world if they are slotted.
846                 system_atom = sets["system"].findAtomForPackage(pkg)
847                 if system_atom:
848                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
849                                 return None
850                         # System virtuals aren't safe to exclude from world since they can
851                         # match multiple old-style virtuals but only one of them will be
852                         # pulled in by update or depclean.
853                         providers = portdb.mysettings.getvirtuals().get(
854                                 portage.dep_getkey(system_atom))
855                         if providers and len(providers) == 1 and providers[0] == cp:
856                                 return None
857         return new_world_atom
858
859 def filter_iuse_defaults(iuse):
860         for flag in iuse:
861                 if flag.startswith("+") or flag.startswith("-"):
862                         yield flag[1:]
863                 else:
864                         yield flag
865
866 class SlotObject(object):
867         __slots__ = ("__weakref__",)
868
869         def __init__(self, **kwargs):
870                 classes = [self.__class__]
871                 while classes:
872                         c = classes.pop()
873                         if c is SlotObject:
874                                 continue
875                         classes.extend(c.__bases__)
876                         slots = getattr(c, "__slots__", None)
877                         if not slots:
878                                 continue
879                         for myattr in slots:
880                                 myvalue = kwargs.get(myattr, None)
881                                 setattr(self, myattr, myvalue)
882
883         def copy(self):
884                 """
885                 Create a new instance and copy all attributes
886                 defined from __slots__ (including those from
887                 inherited classes).
888                 """
889                 obj = self.__class__()
890
891                 classes = [self.__class__]
892                 while classes:
893                         c = classes.pop()
894                         if c is SlotObject:
895                                 continue
896                         classes.extend(c.__bases__)
897                         slots = getattr(c, "__slots__", None)
898                         if not slots:
899                                 continue
900                         for myattr in slots:
901                                 setattr(obj, myattr, getattr(self, myattr))
902
903                 return obj
904
905 class AbstractDepPriority(SlotObject):
906         __slots__ = ("buildtime", "runtime", "runtime_post")
907
908         def __lt__(self, other):
909                 return self.__int__() < other
910
911         def __le__(self, other):
912                 return self.__int__() <= other
913
914         def __eq__(self, other):
915                 return self.__int__() == other
916
917         def __ne__(self, other):
918                 return self.__int__() != other
919
920         def __gt__(self, other):
921                 return self.__int__() > other
922
923         def __ge__(self, other):
924                 return self.__int__() >= other
925
926         def copy(self):
927                 import copy
928                 return copy.copy(self)
929
930 class DepPriority(AbstractDepPriority):
931
932         __slots__ = ("satisfied", "optional", "rebuild")
933
934         def __int__(self):
935                 return 0
936
937         def __str__(self):
938                 if self.optional:
939                         return "optional"
940                 if self.buildtime:
941                         return "buildtime"
942                 if self.runtime:
943                         return "runtime"
944                 if self.runtime_post:
945                         return "runtime_post"
946                 return "soft"
947
948 class BlockerDepPriority(DepPriority):
949         __slots__ = ()
950         def __int__(self):
951                 return 0
952
953         def __str__(self):
954                 return 'blocker'
955
956 BlockerDepPriority.instance = BlockerDepPriority()
957
958 class UnmergeDepPriority(AbstractDepPriority):
959         __slots__ = ("optional", "satisfied",)
960         """
961         Combination of properties           Priority  Category
962
963         runtime                                0       HARD
964         runtime_post                          -1       HARD
965         buildtime                             -2       SOFT
966         (none of the above)                   -2       SOFT
967         """
968
969         MAX    =  0
970         SOFT   = -2
971         MIN    = -2
972
973         def __int__(self):
974                 if self.runtime:
975                         return 0
976                 if self.runtime_post:
977                         return -1
978                 if self.buildtime:
979                         return -2
980                 return -2
981
982         def __str__(self):
983                 myvalue = self.__int__()
984                 if myvalue > self.SOFT:
985                         return "hard"
986                 return "soft"
987
988 class DepPriorityNormalRange(object):
989         """
990         DepPriority properties              Index      Category
991
992         buildtime                                      HARD
993         runtime                                3       MEDIUM
994         runtime_post                           2       MEDIUM_SOFT
995         optional                               1       SOFT
996         (none of the above)                    0       NONE
997         """
998         MEDIUM      = 3
999         MEDIUM_SOFT = 2
1000         SOFT        = 1
1001         NONE        = 0
1002
1003         @classmethod
1004         def _ignore_optional(cls, priority):
1005                 if priority.__class__ is not DepPriority:
1006                         return False
1007                 return bool(priority.optional)
1008
1009         @classmethod
1010         def _ignore_runtime_post(cls, priority):
1011                 if priority.__class__ is not DepPriority:
1012                         return False
1013                 return bool(priority.optional or priority.runtime_post)
1014
1015         @classmethod
1016         def _ignore_runtime(cls, priority):
1017                 if priority.__class__ is not DepPriority:
1018                         return False
1019                 return not priority.buildtime
1020
1021         ignore_medium      = _ignore_runtime
1022         ignore_medium_soft = _ignore_runtime_post
1023         ignore_soft        = _ignore_optional
1024
1025 DepPriorityNormalRange.ignore_priority = (
1026         None,
1027         DepPriorityNormalRange._ignore_optional,
1028         DepPriorityNormalRange._ignore_runtime_post,
1029         DepPriorityNormalRange._ignore_runtime
1030 )
1031
1032 class DepPrioritySatisfiedRange(object):
1033         """
1034         DepPriority                         Index      Category
1035
1036         not satisfied and buildtime                    HARD
1037         not satisfied and runtime              7       MEDIUM
1038         not satisfied and runtime_post         6       MEDIUM_SOFT
1039         satisfied and buildtime and rebuild    5       SOFT
1040         satisfied and buildtime                4       SOFT
1041         satisfied and runtime                  3       SOFT
1042         satisfied and runtime_post             2       SOFT
1043         optional                               1       SOFT
1044         (none of the above)                    0       NONE
1045         """
1046         MEDIUM      = 7
1047         MEDIUM_SOFT = 6
1048         SOFT        = 5
1049         NONE        = 0
1050
1051         @classmethod
1052         def _ignore_optional(cls, priority):
1053                 if priority.__class__ is not DepPriority:
1054                         return False
1055                 return bool(priority.optional)
1056
1057         @classmethod
1058         def _ignore_satisfied_runtime_post(cls, priority):
1059                 if priority.__class__ is not DepPriority:
1060                         return False
1061                 if priority.optional:
1062                         return True
1063                 if not priority.satisfied:
1064                         return False
1065                 return bool(priority.runtime_post)
1066
1067         @classmethod
1068         def _ignore_satisfied_runtime(cls, priority):
1069                 if priority.__class__ is not DepPriority:
1070                         return False
1071                 if priority.optional:
1072                         return True
1073                 if not priority.satisfied:
1074                         return False
1075                 return not priority.buildtime
1076
1077         @classmethod
1078         def _ignore_satisfied_buildtime(cls, priority):
1079                 if priority.__class__ is not DepPriority:
1080                         return False
1081                 if priority.optional:
1082                         return True
1083                 if not priority.satisfied:
1084                         return False
1085                 if priority.buildtime:
1086                         return not priority.rebuild
1087                 return True
1088
1089         @classmethod
1090         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1091                 if priority.__class__ is not DepPriority:
1092                         return False
1093                 if priority.optional:
1094                         return True
1095                 return bool(priority.satisfied)
1096
1097         @classmethod
1098         def _ignore_runtime_post(cls, priority):
1099                 if priority.__class__ is not DepPriority:
1100                         return False
1101                 return bool(priority.optional or \
1102                         priority.satisfied or \
1103                         priority.runtime_post)
1104
1105         @classmethod
1106         def _ignore_runtime(cls, priority):
1107                 if priority.__class__ is not DepPriority:
1108                         return False
1109                 return bool(priority.satisfied or \
1110                         not priority.buildtime)
1111
1112         ignore_medium      = _ignore_runtime
1113         ignore_medium_soft = _ignore_runtime_post
1114         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1115
1116 DepPrioritySatisfiedRange.ignore_priority = (
1117         None,
1118         DepPrioritySatisfiedRange._ignore_optional,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1120         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1122         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1123         DepPrioritySatisfiedRange._ignore_runtime_post,
1124         DepPrioritySatisfiedRange._ignore_runtime
1125 )
1126
1127 def _find_deep_system_runtime_deps(graph):
1128         deep_system_deps = set()
1129         node_stack = []
1130         for node in graph:
1131                 if not isinstance(node, Package) or \
1132                         node.operation == 'uninstall':
1133                         continue
1134                 if node.root_config.sets['system'].findAtomForPackage(node):
1135                         node_stack.append(node)
1136
1137         def ignore_priority(priority):
1138                 """
1139                 Ignore non-runtime priorities.
1140                 """
1141                 if isinstance(priority, DepPriority) and \
1142                         (priority.runtime or priority.runtime_post):
1143                         return False
1144                 return True
1145
1146         while node_stack:
1147                 node = node_stack.pop()
1148                 if node in deep_system_deps:
1149                         continue
1150                 deep_system_deps.add(node)
1151                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1152                         if not isinstance(child, Package) or \
1153                                 child.operation == 'uninstall':
1154                                 continue
1155                         node_stack.append(child)
1156
1157         return deep_system_deps
1158
1159 class FakeVartree(portage.vartree):
1160         """This is implements an in-memory copy of a vartree instance that provides
1161         all the interfaces required for use by the depgraph.  The vardb is locked
1162         during the constructor call just long enough to read a copy of the
1163         installed package information.  This allows the depgraph to do it's
1164         dependency calculations without holding a lock on the vardb.  It also
1165         allows things like vardb global updates to be done in memory so that the
1166         user doesn't necessarily need write access to the vardb in cases where
1167         global updates are necessary (updates are performed when necessary if there
1168         is not a matching ebuild in the tree)."""
1169         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1170                 self._root_config = root_config
1171                 if pkg_cache is None:
1172                         pkg_cache = {}
1173                 real_vartree = root_config.trees["vartree"]
1174                 portdb = root_config.trees["porttree"].dbapi
1175                 self.root = real_vartree.root
1176                 self.settings = real_vartree.settings
1177                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1178                 if "_mtime_" not in mykeys:
1179                         mykeys.append("_mtime_")
1180                 self._db_keys = mykeys
1181                 self._pkg_cache = pkg_cache
1182                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1183                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184                 try:
1185                         # At least the parent needs to exist for the lock file.
1186                         portage.util.ensure_dirs(vdb_path)
1187                 except portage.exception.PortageException:
1188                         pass
1189                 vdb_lock = None
1190                 try:
1191                         if acquire_lock and os.access(vdb_path, os.W_OK):
1192                                 vdb_lock = portage.locks.lockdir(vdb_path)
1193                         real_dbapi = real_vartree.dbapi
1194                         slot_counters = {}
1195                         for cpv in real_dbapi.cpv_all():
1196                                 cache_key = ("installed", self.root, cpv, "nomerge")
1197                                 pkg = self._pkg_cache.get(cache_key)
1198                                 if pkg is not None:
1199                                         metadata = pkg.metadata
1200                                 else:
1201                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1202                                 myslot = metadata["SLOT"]
1203                                 mycp = portage.dep_getkey(cpv)
1204                                 myslot_atom = "%s:%s" % (mycp, myslot)
1205                                 try:
1206                                         mycounter = long(metadata["COUNTER"])
1207                                 except ValueError:
1208                                         mycounter = 0
1209                                         metadata["COUNTER"] = str(mycounter)
1210                                 other_counter = slot_counters.get(myslot_atom, None)
1211                                 if other_counter is not None:
1212                                         if other_counter > mycounter:
1213                                                 continue
1214                                 slot_counters[myslot_atom] = mycounter
1215                                 if pkg is None:
1216                                         pkg = Package(built=True, cpv=cpv,
1217                                                 installed=True, metadata=metadata,
1218                                                 root_config=root_config, type_name="installed")
1219                                 self._pkg_cache[pkg] = pkg
1220                                 self.dbapi.cpv_inject(pkg)
1221                         real_dbapi.flush_cache()
1222                 finally:
1223                         if vdb_lock:
1224                                 portage.locks.unlockdir(vdb_lock)
1225                 # Populate the old-style virtuals using the cached values.
1226                 if not self.settings.treeVirtuals:
1227                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1228                                 portage.getCPFromCPV, self.get_all_provides())
1229
1230                 # Intialize variables needed for lazy cache pulls of the live ebuild
1231                 # metadata.  This ensures that the vardb lock is released ASAP, without
1232                 # being delayed in case cache generation is triggered.
1233                 self._aux_get = self.dbapi.aux_get
1234                 self.dbapi.aux_get = self._aux_get_wrapper
1235                 self._match = self.dbapi.match
1236                 self.dbapi.match = self._match_wrapper
1237                 self._aux_get_history = set()
1238                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1239                 self._portdb = portdb
1240                 self._global_updates = None
1241
1242         def _match_wrapper(self, cpv, use_cache=1):
1243                 """
1244                 Make sure the metadata in Package instances gets updated for any
1245                 cpv that is returned from a match() call, since the metadata can
1246                 be accessed directly from the Package instance instead of via
1247                 aux_get().
1248                 """
1249                 matches = self._match(cpv, use_cache=use_cache)
1250                 for cpv in matches:
1251                         if cpv in self._aux_get_history:
1252                                 continue
1253                         self._aux_get_wrapper(cpv, [])
1254                 return matches
1255
1256         def _aux_get_wrapper(self, pkg, wants):
1257                 if pkg in self._aux_get_history:
1258                         return self._aux_get(pkg, wants)
1259                 self._aux_get_history.add(pkg)
1260                 try:
1261                         # Use the live ebuild metadata if possible.
1262                         live_metadata = dict(izip(self._portdb_keys,
1263                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1264                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265                                 raise KeyError(pkg)
1266                         self.dbapi.aux_update(pkg, live_metadata)
1267                 except (KeyError, portage.exception.PortageException):
1268                         if self._global_updates is None:
1269                                 self._global_updates = \
1270                                         grab_global_updates(self._portdb.porttree_root)
1271                         perform_global_updates(
1272                                 pkg, self.dbapi, self._global_updates)
1273                 return self._aux_get(pkg, wants)
1274
1275         def sync(self, acquire_lock=1):
1276                 """
1277                 Call this method to synchronize state with the real vardb
1278                 after one or more packages may have been installed or
1279                 uninstalled.
1280                 """
1281                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282                 try:
1283                         # At least the parent needs to exist for the lock file.
1284                         portage.util.ensure_dirs(vdb_path)
1285                 except portage.exception.PortageException:
1286                         pass
1287                 vdb_lock = None
1288                 try:
1289                         if acquire_lock and os.access(vdb_path, os.W_OK):
1290                                 vdb_lock = portage.locks.lockdir(vdb_path)
1291                         self._sync()
1292                 finally:
1293                         if vdb_lock:
1294                                 portage.locks.unlockdir(vdb_lock)
1295
1296         def _sync(self):
1297
1298                 real_vardb = self._root_config.trees["vartree"].dbapi
1299                 current_cpv_set = frozenset(real_vardb.cpv_all())
1300                 pkg_vardb = self.dbapi
1301                 aux_get_history = self._aux_get_history
1302
1303                 # Remove any packages that have been uninstalled.
1304                 for pkg in list(pkg_vardb):
1305                         if pkg.cpv not in current_cpv_set:
1306                                 pkg_vardb.cpv_remove(pkg)
1307                                 aux_get_history.discard(pkg.cpv)
1308
1309                 # Validate counters and timestamps.
1310                 slot_counters = {}
1311                 root = self.root
1312                 validation_keys = ["COUNTER", "_mtime_"]
1313                 for cpv in current_cpv_set:
1314
1315                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1316                         pkg = pkg_vardb.get(pkg_hash_key)
1317                         if pkg is not None:
1318                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319                                 try:
1320                                         counter = long(counter)
1321                                 except ValueError:
1322                                         counter = 0
1323
1324                                 if counter != pkg.counter or \
1325                                         mtime != pkg.mtime:
1326                                         pkg_vardb.cpv_remove(pkg)
1327                                         aux_get_history.discard(pkg.cpv)
1328                                         pkg = None
1329
1330                         if pkg is None:
1331                                 pkg = self._pkg(cpv)
1332
1333                         other_counter = slot_counters.get(pkg.slot_atom)
1334                         if other_counter is not None:
1335                                 if other_counter > pkg.counter:
1336                                         continue
1337
1338                         slot_counters[pkg.slot_atom] = pkg.counter
1339                         pkg_vardb.cpv_inject(pkg)
1340
1341                 real_vardb.flush_cache()
1342
1343         def _pkg(self, cpv):
1344                 root_config = self._root_config
1345                 real_vardb = root_config.trees["vartree"].dbapi
1346                 pkg = Package(cpv=cpv, installed=True,
1347                         metadata=izip(self._db_keys,
1348                         real_vardb.aux_get(cpv, self._db_keys)),
1349                         root_config=root_config,
1350                         type_name="installed")
1351
1352                 try:
1353                         mycounter = long(pkg.metadata["COUNTER"])
1354                 except ValueError:
1355                         mycounter = 0
1356                         pkg.metadata["COUNTER"] = str(mycounter)
1357
1358                 return pkg
1359
1360 def grab_global_updates(portdir):
1361         from portage.update import grab_updates, parse_updates
1362         updpath = os.path.join(portdir, "profiles", "updates")
1363         try:
1364                 rawupdates = grab_updates(updpath)
1365         except portage.exception.DirectoryNotFound:
1366                 rawupdates = []
1367         upd_commands = []
1368         for mykey, mystat, mycontent in rawupdates:
1369                 commands, errors = parse_updates(mycontent)
1370                 upd_commands.extend(commands)
1371         return upd_commands
1372
1373 def perform_global_updates(mycpv, mydb, mycommands):
1374         from portage.update import update_dbentries
1375         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1376         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1377         updates = update_dbentries(mycommands, aux_dict)
1378         if updates:
1379                 mydb.aux_update(mycpv, updates)
1380
1381 def visible(pkgsettings, pkg):
1382         """
1383         Check if a package is visible. This can raise an InvalidDependString
1384         exception if LICENSE is invalid.
1385         TODO: optionally generate a list of masking reasons
1386         @rtype: Boolean
1387         @returns: True if the package is visible, False otherwise.
1388         """
1389         if not pkg.metadata["SLOT"]:
1390                 return False
1391         if not pkg.installed:
1392                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393                         return False
1394         eapi = pkg.metadata["EAPI"]
1395         if not portage.eapi_is_supported(eapi):
1396                 return False
1397         if not pkg.installed:
1398                 if portage._eapi_is_deprecated(eapi):
1399                         return False
1400                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401                         return False
1402         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403                 return False
1404         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1405                 return False
1406         try:
1407                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408                         return False
1409         except portage.exception.InvalidDependString:
1410                 return False
1411         return True
1412
1413 def get_masking_status(pkg, pkgsettings, root_config):
1414
1415         mreasons = portage.getmaskingstatus(
1416                 pkg, settings=pkgsettings,
1417                 portdb=root_config.trees["porttree"].dbapi)
1418
1419         if not pkg.installed:
1420                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1421                         mreasons.append("CHOST: %s" % \
1422                                 pkg.metadata["CHOST"])
1423
1424         if not pkg.metadata["SLOT"]:
1425                 mreasons.append("invalid: SLOT is undefined")
1426
1427         return mreasons
1428
1429 def get_mask_info(root_config, cpv, pkgsettings,
1430         db, pkg_type, built, installed, db_keys):
1431         eapi_masked = False
1432         try:
1433                 metadata = dict(izip(db_keys,
1434                         db.aux_get(cpv, db_keys)))
1435         except KeyError:
1436                 metadata = None
1437         if metadata and not built:
1438                 pkgsettings.setcpv(cpv, mydb=metadata)
1439                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1440                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1441         if metadata is None:
1442                 mreasons = ["corruption"]
1443         else:
1444                 eapi = metadata['EAPI']
1445                 if eapi[:1] == '-':
1446                         eapi = eapi[1:]
1447                 if not portage.eapi_is_supported(eapi):
1448                         mreasons = ['EAPI %s' % eapi]
1449                 else:
1450                         pkg = Package(type_name=pkg_type, root_config=root_config,
1451                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1452                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1453         return metadata, mreasons
1454
1455 def show_masked_packages(masked_packages):
1456         shown_licenses = set()
1457         shown_comments = set()
1458         # Maybe there is both an ebuild and a binary. Only
1459         # show one of them to avoid redundant appearance.
1460         shown_cpvs = set()
1461         have_eapi_mask = False
1462         for (root_config, pkgsettings, cpv,
1463                 metadata, mreasons) in masked_packages:
1464                 if cpv in shown_cpvs:
1465                         continue
1466                 shown_cpvs.add(cpv)
1467                 comment, filename = None, None
1468                 if "package.mask" in mreasons:
1469                         comment, filename = \
1470                                 portage.getmaskingreason(
1471                                 cpv, metadata=metadata,
1472                                 settings=pkgsettings,
1473                                 portdb=root_config.trees["porttree"].dbapi,
1474                                 return_location=True)
1475                 missing_licenses = []
1476                 if metadata:
1477                         if not portage.eapi_is_supported(metadata["EAPI"]):
1478                                 have_eapi_mask = True
1479                         try:
1480                                 missing_licenses = \
1481                                         pkgsettings._getMissingLicenses(
1482                                                 cpv, metadata)
1483                         except portage.exception.InvalidDependString:
1484                                 # This will have already been reported
1485                                 # above via mreasons.
1486                                 pass
1487
1488                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1489                 if comment and comment not in shown_comments:
1490                         print filename+":"
1491                         print comment
1492                         shown_comments.add(comment)
1493                 portdb = root_config.trees["porttree"].dbapi
1494                 for l in missing_licenses:
1495                         l_path = portdb.findLicensePath(l)
1496                         if l in shown_licenses:
1497                                 continue
1498                         msg = ("A copy of the '%s' license" + \
1499                         " is located at '%s'.") % (l, l_path)
1500                         print msg
1501                         print
1502                         shown_licenses.add(l)
1503         return have_eapi_mask
1504
1505 class Task(SlotObject):
1506         __slots__ = ("_hash_key", "_hash_value")
1507
1508         def _get_hash_key(self):
1509                 hash_key = getattr(self, "_hash_key", None)
1510                 if hash_key is None:
1511                         raise NotImplementedError(self)
1512                 return hash_key
1513
1514         def __eq__(self, other):
1515                 return self._get_hash_key() == other
1516
1517         def __ne__(self, other):
1518                 return self._get_hash_key() != other
1519
1520         def __hash__(self):
1521                 hash_value = getattr(self, "_hash_value", None)
1522                 if hash_value is None:
1523                         self._hash_value = hash(self._get_hash_key())
1524                 return self._hash_value
1525
1526         def __len__(self):
1527                 return len(self._get_hash_key())
1528
1529         def __getitem__(self, key):
1530                 return self._get_hash_key()[key]
1531
1532         def __iter__(self):
1533                 return iter(self._get_hash_key())
1534
1535         def __contains__(self, key):
1536                 return key in self._get_hash_key()
1537
1538         def __str__(self):
1539                 return str(self._get_hash_key())
1540
1541 class Blocker(Task):
1542
1543         __hash__ = Task.__hash__
1544         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1545
1546         def __init__(self, **kwargs):
1547                 Task.__init__(self, **kwargs)
1548                 self.cp = portage.dep_getkey(self.atom)
1549
1550         def _get_hash_key(self):
1551                 hash_key = getattr(self, "_hash_key", None)
1552                 if hash_key is None:
1553                         self._hash_key = \
1554                                 ("blocks", self.root, self.atom, self.eapi)
1555                 return self._hash_key
1556
1557 class Package(Task):
1558
1559         __hash__ = Task.__hash__
1560         __slots__ = ("built", "cpv", "depth",
1561                 "installed", "metadata", "onlydeps", "operation",
1562                 "root_config", "type_name",
1563                 "category", "counter", "cp", "cpv_split",
1564                 "inherited", "iuse", "mtime",
1565                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1566
1567         metadata_keys = [
1568                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1569                 "INHERITED", "IUSE", "KEYWORDS",
1570                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1571                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1572
1573         def __init__(self, **kwargs):
1574                 Task.__init__(self, **kwargs)
1575                 self.root = self.root_config.root
1576                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1577                 self.cp = portage.cpv_getkey(self.cpv)
1578                 slot = self.slot
1579                 if not slot:
1580                         # Avoid an InvalidAtom exception when creating slot_atom.
1581                         # This package instance will be masked due to empty SLOT.
1582                         slot = '0'
1583                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1584                 self.category, self.pf = portage.catsplit(self.cpv)
1585                 self.cpv_split = portage.catpkgsplit(self.cpv)
1586                 self.pv_split = self.cpv_split[1:]
1587
1588         class _use(object):
1589
1590                 __slots__ = ("__weakref__", "enabled")
1591
1592                 def __init__(self, use):
1593                         self.enabled = frozenset(use)
1594
1595         class _iuse(object):
1596
1597                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1598
1599                 def __init__(self, tokens, iuse_implicit):
1600                         self.tokens = tuple(tokens)
1601                         self.iuse_implicit = iuse_implicit
1602                         enabled = []
1603                         disabled = []
1604                         other = []
1605                         for x in tokens:
1606                                 prefix = x[:1]
1607                                 if prefix == "+":
1608                                         enabled.append(x[1:])
1609                                 elif prefix == "-":
1610                                         disabled.append(x[1:])
1611                                 else:
1612                                         other.append(x)
1613                         self.enabled = frozenset(enabled)
1614                         self.disabled = frozenset(disabled)
1615                         self.all = frozenset(chain(enabled, disabled, other))
1616
1617                 def __getattribute__(self, name):
1618                         if name == "regex":
1619                                 try:
1620                                         return object.__getattribute__(self, "regex")
1621                                 except AttributeError:
1622                                         all = object.__getattribute__(self, "all")
1623                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1624                                         # Escape anything except ".*" which is supposed
1625                                         # to pass through from _get_implicit_iuse()
1626                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1627                                         regex = "^(%s)$" % "|".join(regex)
1628                                         regex = regex.replace("\\.\\*", ".*")
1629                                         self.regex = re.compile(regex)
1630                         return object.__getattribute__(self, name)
1631
1632         def _get_hash_key(self):
1633                 hash_key = getattr(self, "_hash_key", None)
1634                 if hash_key is None:
1635                         if self.operation is None:
1636                                 self.operation = "merge"
1637                                 if self.onlydeps or self.installed:
1638                                         self.operation = "nomerge"
1639                         self._hash_key = \
1640                                 (self.type_name, self.root, self.cpv, self.operation)
1641                 return self._hash_key
1642
1643         def __lt__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1647                         return True
1648                 return False
1649
1650         def __le__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1654                         return True
1655                 return False
1656
1657         def __gt__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1661                         return True
1662                 return False
1663
1664         def __ge__(self, other):
1665                 if other.cp != self.cp:
1666                         return False
1667                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1668                         return True
1669                 return False
1670
1671 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1672         if not x.startswith("UNUSED_"))
1673 _all_metadata_keys.discard("CDEPEND")
1674 _all_metadata_keys.update(Package.metadata_keys)
1675
1676 from portage.cache.mappings import slot_dict_class
1677 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1678
1679 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1680         """
1681         Detect metadata updates and synchronize Package attributes.
1682         """
1683
1684         __slots__ = ("_pkg",)
1685         _wrapped_keys = frozenset(
1686                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1687
1688         def __init__(self, pkg, metadata):
1689                 _PackageMetadataWrapperBase.__init__(self)
1690                 self._pkg = pkg
1691                 self.update(metadata)
1692
1693         def __setitem__(self, k, v):
1694                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1695                 if k in self._wrapped_keys:
1696                         getattr(self, "_set_" + k.lower())(k, v)
1697
1698         def _set_inherited(self, k, v):
1699                 if isinstance(v, basestring):
1700                         v = frozenset(v.split())
1701                 self._pkg.inherited = v
1702
1703         def _set_iuse(self, k, v):
1704                 self._pkg.iuse = self._pkg._iuse(
1705                         v.split(), self._pkg.root_config.iuse_implicit)
1706
1707         def _set_slot(self, k, v):
1708                 self._pkg.slot = v
1709
1710         def _set_use(self, k, v):
1711                 self._pkg.use = self._pkg._use(v.split())
1712
1713         def _set_counter(self, k, v):
1714                 if isinstance(v, basestring):
1715                         try:
1716                                 v = long(v.strip())
1717                         except ValueError:
1718                                 v = 0
1719                 self._pkg.counter = v
1720
1721         def _set__mtime_(self, k, v):
1722                 if isinstance(v, basestring):
1723                         try:
1724                                 v = long(v.strip())
1725                         except ValueError:
1726                                 v = 0
1727                 self._pkg.mtime = v
1728
1729 class EbuildFetchonly(SlotObject):
1730
1731         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1732
1733         def execute(self):
1734                 settings = self.settings
1735                 pkg = self.pkg
1736                 portdb = pkg.root_config.trees["porttree"].dbapi
1737                 ebuild_path = portdb.findname(pkg.cpv)
1738                 settings.setcpv(pkg)
1739                 debug = settings.get("PORTAGE_DEBUG") == "1"
1740                 use_cache = 1 # always true
1741                 portage.doebuild_environment(ebuild_path, "fetch",
1742                         settings["ROOT"], settings, debug, use_cache, portdb)
1743                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1744
1745                 if restrict_fetch:
1746                         rval = self._execute_with_builddir()
1747                 else:
1748                         rval = portage.doebuild(ebuild_path, "fetch",
1749                                 settings["ROOT"], settings, debug=debug,
1750                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1751                                 mydbapi=portdb, tree="porttree")
1752
1753                         if rval != os.EX_OK:
1754                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1755                                 eerror(msg, phase="unpack", key=pkg.cpv)
1756
1757                 return rval
1758
1759         def _execute_with_builddir(self):
1760                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1761                 # ensuring sane $PWD (bug #239560) and storing elog
1762                 # messages. Use a private temp directory, in order
1763                 # to avoid locking the main one.
1764                 settings = self.settings
1765                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1766                 from tempfile import mkdtemp
1767                 try:
1768                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1769                 except OSError, e:
1770                         if e.errno != portage.exception.PermissionDenied.errno:
1771                                 raise
1772                         raise portage.exception.PermissionDenied(global_tmpdir)
1773                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1774                 settings.backup_changes("PORTAGE_TMPDIR")
1775                 try:
1776                         retval = self._execute()
1777                 finally:
1778                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1779                         settings.backup_changes("PORTAGE_TMPDIR")
1780                         shutil.rmtree(private_tmpdir)
1781                 return retval
1782
1783         def _execute(self):
1784                 settings = self.settings
1785                 pkg = self.pkg
1786                 root_config = pkg.root_config
1787                 portdb = root_config.trees["porttree"].dbapi
1788                 ebuild_path = portdb.findname(pkg.cpv)
1789                 debug = settings.get("PORTAGE_DEBUG") == "1"
1790                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1791
1792                 retval = portage.doebuild(ebuild_path, "fetch",
1793                         self.settings["ROOT"], self.settings, debug=debug,
1794                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1795                         mydbapi=portdb, tree="porttree")
1796
1797                 if retval != os.EX_OK:
1798                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1799                         eerror(msg, phase="unpack", key=pkg.cpv)
1800
1801                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1802                 return retval
1803
1804 class PollConstants(object):
1805
1806         """
1807         Provides POLL* constants that are equivalent to those from the
1808         select module, for use by PollSelectAdapter.
1809         """
1810
1811         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1812         v = 1
1813         for k in names:
1814                 locals()[k] = getattr(select, k, v)
1815                 v *= 2
1816         del k, v
1817
1818 class AsynchronousTask(SlotObject):
1819         """
1820         Subclasses override _wait() and _poll() so that calls
1821         to public methods can be wrapped for implementing
1822         hooks such as exit listener notification.
1823
1824         Sublasses should call self.wait() to notify exit listeners after
1825         the task is complete and self.returncode has been set.
1826         """
1827
1828         __slots__ = ("background", "cancelled", "returncode") + \
1829                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1830
1831         def start(self):
1832                 """
1833                 Start an asynchronous task and then return as soon as possible.
1834                 """
1835                 self._start_hook()
1836                 self._start()
1837
1838         def _start(self):
1839                 raise NotImplementedError(self)
1840
1841         def isAlive(self):
1842                 return self.returncode is None
1843
1844         def poll(self):
1845                 self._wait_hook()
1846                 return self._poll()
1847
1848         def _poll(self):
1849                 return self.returncode
1850
1851         def wait(self):
1852                 if self.returncode is None:
1853                         self._wait()
1854                 self._wait_hook()
1855                 return self.returncode
1856
1857         def _wait(self):
1858                 return self.returncode
1859
1860         def cancel(self):
1861                 self.cancelled = True
1862                 self.wait()
1863
1864         def addStartListener(self, f):
1865                 """
1866                 The function will be called with one argument, a reference to self.
1867                 """
1868                 if self._start_listeners is None:
1869                         self._start_listeners = []
1870                 self._start_listeners.append(f)
1871
1872         def removeStartListener(self, f):
1873                 if self._start_listeners is None:
1874                         return
1875                 self._start_listeners.remove(f)
1876
1877         def _start_hook(self):
1878                 if self._start_listeners is not None:
1879                         start_listeners = self._start_listeners
1880                         self._start_listeners = None
1881
1882                         for f in start_listeners:
1883                                 f(self)
1884
1885         def addExitListener(self, f):
1886                 """
1887                 The function will be called with one argument, a reference to self.
1888                 """
1889                 if self._exit_listeners is None:
1890                         self._exit_listeners = []
1891                 self._exit_listeners.append(f)
1892
1893         def removeExitListener(self, f):
1894                 if self._exit_listeners is None:
1895                         if self._exit_listener_stack is not None:
1896                                 self._exit_listener_stack.remove(f)
1897                         return
1898                 self._exit_listeners.remove(f)
1899
1900         def _wait_hook(self):
1901                 """
1902                 Call this method after the task completes, just before returning
1903                 the returncode from wait() or poll(). This hook is
1904                 used to trigger exit listeners when the returncode first
1905                 becomes available.
1906                 """
1907                 if self.returncode is not None and \
1908                         self._exit_listeners is not None:
1909
1910                         # This prevents recursion, in case one of the
1911                         # exit handlers triggers this method again by
1912                         # calling wait(). Use a stack that gives
1913                         # removeExitListener() an opportunity to consume
1914                         # listeners from the stack, before they can get
1915                         # called below. This is necessary because a call
1916                         # to one exit listener may result in a call to
1917                         # removeExitListener() for another listener on
1918                         # the stack. That listener needs to be removed
1919                         # from the stack since it would be inconsistent
1920                         # to call it after it has been been passed into
1921                         # removeExitListener().
1922                         self._exit_listener_stack = self._exit_listeners
1923                         self._exit_listeners = None
1924
1925                         self._exit_listener_stack.reverse()
1926                         while self._exit_listener_stack:
1927                                 self._exit_listener_stack.pop()(self)
1928
1929 class AbstractPollTask(AsynchronousTask):
1930
1931         __slots__ = ("scheduler",) + \
1932                 ("_registered",)
1933
1934         _bufsize = 4096
1935         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1936         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1937                 _exceptional_events
1938
1939         def _unregister(self):
1940                 raise NotImplementedError(self)
1941
1942         def _unregister_if_appropriate(self, event):
1943                 if self._registered:
1944                         if event & self._exceptional_events:
1945                                 self._unregister()
1946                                 self.cancel()
1947                         elif event & PollConstants.POLLHUP:
1948                                 self._unregister()
1949                                 self.wait()
1950
1951 class PipeReader(AbstractPollTask):
1952
1953         """
1954         Reads output from one or more files and saves it in memory,
1955         for retrieval via the getvalue() method. This is driven by
1956         the scheduler's poll() loop, so it runs entirely within the
1957         current process.
1958         """
1959
1960         __slots__ = ("input_files",) + \
1961                 ("_read_data", "_reg_ids")
1962
1963         def _start(self):
1964                 self._reg_ids = set()
1965                 self._read_data = []
1966                 for k, f in self.input_files.iteritems():
1967                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1968                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1969                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1970                                 self._registered_events, self._output_handler))
1971                 self._registered = True
1972
1973         def isAlive(self):
1974                 return self._registered
1975
1976         def cancel(self):
1977                 if self.returncode is None:
1978                         self.returncode = 1
1979                         self.cancelled = True
1980                 self.wait()
1981
1982         def _wait(self):
1983                 if self.returncode is not None:
1984                         return self.returncode
1985
1986                 if self._registered:
1987                         self.scheduler.schedule(self._reg_ids)
1988                         self._unregister()
1989
1990                 self.returncode = os.EX_OK
1991                 return self.returncode
1992
1993         def getvalue(self):
1994                 """Retrieve the entire contents"""
1995                 if sys.hexversion >= 0x3000000:
1996                         return bytes().join(self._read_data)
1997                 return "".join(self._read_data)
1998
1999         def close(self):
2000                 """Free the memory buffer."""
2001                 self._read_data = None
2002
2003         def _output_handler(self, fd, event):
2004
2005                 if event & PollConstants.POLLIN:
2006
2007                         for f in self.input_files.itervalues():
2008                                 if fd == f.fileno():
2009                                         break
2010
2011                         buf = array.array('B')
2012                         try:
2013                                 buf.fromfile(f, self._bufsize)
2014                         except EOFError:
2015                                 pass
2016
2017                         if buf:
2018                                 self._read_data.append(buf.tostring())
2019                         else:
2020                                 self._unregister()
2021                                 self.wait()
2022
2023                 self._unregister_if_appropriate(event)
2024                 return self._registered
2025
2026         def _unregister(self):
2027                 """
2028                 Unregister from the scheduler and close open files.
2029                 """
2030
2031                 self._registered = False
2032
2033                 if self._reg_ids is not None:
2034                         for reg_id in self._reg_ids:
2035                                 self.scheduler.unregister(reg_id)
2036                         self._reg_ids = None
2037
2038                 if self.input_files is not None:
2039                         for f in self.input_files.itervalues():
2040                                 f.close()
2041                         self.input_files = None
2042
2043 class CompositeTask(AsynchronousTask):
2044
2045         __slots__ = ("scheduler",) + ("_current_task",)
2046
2047         def isAlive(self):
2048                 return self._current_task is not None
2049
2050         def cancel(self):
2051                 self.cancelled = True
2052                 if self._current_task is not None:
2053                         self._current_task.cancel()
2054
2055         def _poll(self):
2056                 """
2057                 This does a loop calling self._current_task.poll()
2058                 repeatedly as long as the value of self._current_task
2059                 keeps changing. It calls poll() a maximum of one time
2060                 for a given self._current_task instance. This is useful
2061                 since calling poll() on a task can trigger advance to
2062                 the next task could eventually lead to the returncode
2063                 being set in cases when polling only a single task would
2064                 not have the same effect.
2065                 """
2066
2067                 prev = None
2068                 while True:
2069                         task = self._current_task
2070                         if task is None or task is prev:
2071                                 # don't poll the same task more than once
2072                                 break
2073                         task.poll()
2074                         prev = task
2075
2076                 return self.returncode
2077
2078         def _wait(self):
2079
2080                 prev = None
2081                 while True:
2082                         task = self._current_task
2083                         if task is None:
2084                                 # don't wait for the same task more than once
2085                                 break
2086                         if task is prev:
2087                                 # Before the task.wait() method returned, an exit
2088                                 # listener should have set self._current_task to either
2089                                 # a different task or None. Something is wrong.
2090                                 raise AssertionError("self._current_task has not " + \
2091                                         "changed since calling wait", self, task)
2092                         task.wait()
2093                         prev = task
2094
2095                 return self.returncode
2096
2097         def _assert_current(self, task):
2098                 """
2099                 Raises an AssertionError if the given task is not the
2100                 same one as self._current_task. This can be useful
2101                 for detecting bugs.
2102                 """
2103                 if task is not self._current_task:
2104                         raise AssertionError("Unrecognized task: %s" % (task,))
2105
2106         def _default_exit(self, task):
2107                 """
2108                 Calls _assert_current() on the given task and then sets the
2109                 composite returncode attribute if task.returncode != os.EX_OK.
2110                 If the task failed then self._current_task will be set to None.
2111                 Subclasses can use this as a generic task exit callback.
2112
2113                 @rtype: int
2114                 @returns: The task.returncode attribute.
2115                 """
2116                 self._assert_current(task)
2117                 if task.returncode != os.EX_OK:
2118                         self.returncode = task.returncode
2119                         self._current_task = None
2120                 return task.returncode
2121
2122         def _final_exit(self, task):
2123                 """
2124                 Assumes that task is the final task of this composite task.
2125                 Calls _default_exit() and sets self.returncode to the task's
2126                 returncode and sets self._current_task to None.
2127                 """
2128                 self._default_exit(task)
2129                 self._current_task = None
2130                 self.returncode = task.returncode
2131                 return self.returncode
2132
2133         def _default_final_exit(self, task):
2134                 """
2135                 This calls _final_exit() and then wait().
2136
2137                 Subclasses can use this as a generic final task exit callback.
2138
2139                 """
2140                 self._final_exit(task)
2141                 return self.wait()
2142
2143         def _start_task(self, task, exit_handler):
2144                 """
2145                 Register exit handler for the given task, set it
2146                 as self._current_task, and call task.start().
2147
2148                 Subclasses can use this as a generic way to start
2149                 a task.
2150
2151                 """
2152                 task.addExitListener(exit_handler)
2153                 self._current_task = task
2154                 task.start()
2155
2156 class TaskSequence(CompositeTask):
2157         """
2158         A collection of tasks that executes sequentially. Each task
2159         must have a addExitListener() method that can be used as
2160         a means to trigger movement from one task to the next.
2161         """
2162
2163         __slots__ = ("_task_queue",)
2164
2165         def __init__(self, **kwargs):
2166                 AsynchronousTask.__init__(self, **kwargs)
2167                 self._task_queue = deque()
2168
2169         def add(self, task):
2170                 self._task_queue.append(task)
2171
2172         def _start(self):
2173                 self._start_next_task()
2174
2175         def cancel(self):
2176                 self._task_queue.clear()
2177                 CompositeTask.cancel(self)
2178
2179         def _start_next_task(self):
2180                 self._start_task(self._task_queue.popleft(),
2181                         self._task_exit_handler)
2182
2183         def _task_exit_handler(self, task):
2184                 if self._default_exit(task) != os.EX_OK:
2185                         self.wait()
2186                 elif self._task_queue:
2187                         self._start_next_task()
2188                 else:
2189                         self._final_exit(task)
2190                         self.wait()
2191
2192 class SubProcess(AbstractPollTask):
2193
2194         __slots__ = ("pid",) + \
2195                 ("_files", "_reg_id")
2196
2197         # A file descriptor is required for the scheduler to monitor changes from
2198         # inside a poll() loop. When logging is not enabled, create a pipe just to
2199         # serve this purpose alone.
2200         _dummy_pipe_fd = 9
2201
2202         def _poll(self):
2203                 if self.returncode is not None:
2204                         return self.returncode
2205                 if self.pid is None:
2206                         return self.returncode
2207                 if self._registered:
2208                         return self.returncode
2209
2210                 try:
2211                         retval = os.waitpid(self.pid, os.WNOHANG)
2212                 except OSError, e:
2213                         if e.errno != errno.ECHILD:
2214                                 raise
2215                         del e
2216                         retval = (self.pid, 1)
2217
2218                 if retval == (0, 0):
2219                         return None
2220                 self._set_returncode(retval)
2221                 return self.returncode
2222
2223         def cancel(self):
2224                 if self.isAlive():
2225                         try:
2226                                 os.kill(self.pid, signal.SIGTERM)
2227                         except OSError, e:
2228                                 if e.errno != errno.ESRCH:
2229                                         raise
2230                                 del e
2231
2232                 self.cancelled = True
2233                 if self.pid is not None:
2234                         self.wait()
2235                 return self.returncode
2236
2237         def isAlive(self):
2238                 return self.pid is not None and \
2239                         self.returncode is None
2240
2241         def _wait(self):
2242
2243                 if self.returncode is not None:
2244                         return self.returncode
2245
2246                 if self._registered:
2247                         self.scheduler.schedule(self._reg_id)
2248                         self._unregister()
2249                         if self.returncode is not None:
2250                                 return self.returncode
2251
2252                 try:
2253                         wait_retval = os.waitpid(self.pid, 0)
2254                 except OSError, e:
2255                         if e.errno != errno.ECHILD:
2256                                 raise
2257                         del e
2258                         self._set_returncode((self.pid, 1))
2259                 else:
2260                         self._set_returncode(wait_retval)
2261
2262                 return self.returncode
2263
2264         def _unregister(self):
2265                 """
2266                 Unregister from the scheduler and close open files.
2267                 """
2268
2269                 self._registered = False
2270
2271                 if self._reg_id is not None:
2272                         self.scheduler.unregister(self._reg_id)
2273                         self._reg_id = None
2274
2275                 if self._files is not None:
2276                         for f in self._files.itervalues():
2277                                 f.close()
2278                         self._files = None
2279
2280         def _set_returncode(self, wait_retval):
2281
2282                 retval = wait_retval[1]
2283
2284                 if retval != os.EX_OK:
2285                         if retval & 0xff:
2286                                 retval = (retval & 0xff) << 8
2287                         else:
2288                                 retval = retval >> 8
2289
2290                 self.returncode = retval
2291
2292 class SpawnProcess(SubProcess):
2293
2294         """
2295         Constructor keyword args are passed into portage.process.spawn().
2296         The required "args" keyword argument will be passed as the first
2297         spawn() argument.
2298         """
2299
2300         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2301                 "uid", "gid", "groups", "umask", "logfile",
2302                 "path_lookup", "pre_exec")
2303
2304         __slots__ = ("args",) + \
2305                 _spawn_kwarg_names
2306
2307         _file_names = ("log", "process", "stdout")
2308         _files_dict = slot_dict_class(_file_names, prefix="")
2309
2310         def _start(self):
2311
2312                 if self.cancelled:
2313                         return
2314
2315                 if self.fd_pipes is None:
2316                         self.fd_pipes = {}
2317                 fd_pipes = self.fd_pipes
2318                 fd_pipes.setdefault(0, sys.stdin.fileno())
2319                 fd_pipes.setdefault(1, sys.stdout.fileno())
2320                 fd_pipes.setdefault(2, sys.stderr.fileno())
2321
2322                 # flush any pending output
2323                 for fd in fd_pipes.itervalues():
2324                         if fd == sys.stdout.fileno():
2325                                 sys.stdout.flush()
2326                         if fd == sys.stderr.fileno():
2327                                 sys.stderr.flush()
2328
2329                 logfile = self.logfile
2330                 self._files = self._files_dict()
2331                 files = self._files
2332
2333                 master_fd, slave_fd = self._pipe(fd_pipes)
2334                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2335                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2336
2337                 null_input = None
2338                 fd_pipes_orig = fd_pipes.copy()
2339                 if self.background:
2340                         # TODO: Use job control functions like tcsetpgrp() to control
2341                         # access to stdin. Until then, use /dev/null so that any
2342                         # attempts to read from stdin will immediately return EOF
2343                         # instead of blocking indefinitely.
2344                         null_input = open('/dev/null', 'rb')
2345                         fd_pipes[0] = null_input.fileno()
2346                 else:
2347                         fd_pipes[0] = fd_pipes_orig[0]
2348
2349                 files.process = os.fdopen(master_fd, 'rb')
2350                 if logfile is not None:
2351
2352                         fd_pipes[1] = slave_fd
2353                         fd_pipes[2] = slave_fd
2354
2355                         files.log = open(logfile, mode='ab')
2356                         portage.util.apply_secpass_permissions(logfile,
2357                                 uid=portage.portage_uid, gid=portage.portage_gid,
2358                                 mode=0660)
2359
2360                         if not self.background:
2361                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2362
2363                         output_handler = self._output_handler
2364
2365                 else:
2366
2367                         # Create a dummy pipe so the scheduler can monitor
2368                         # the process from inside a poll() loop.
2369                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2370                         if self.background:
2371                                 fd_pipes[1] = slave_fd
2372                                 fd_pipes[2] = slave_fd
2373                         output_handler = self._dummy_handler
2374
2375                 kwargs = {}
2376                 for k in self._spawn_kwarg_names:
2377                         v = getattr(self, k)
2378                         if v is not None:
2379                                 kwargs[k] = v
2380
2381                 kwargs["fd_pipes"] = fd_pipes
2382                 kwargs["returnpid"] = True
2383                 kwargs.pop("logfile", None)
2384
2385                 self._reg_id = self.scheduler.register(files.process.fileno(),
2386                         self._registered_events, output_handler)
2387                 self._registered = True
2388
2389                 retval = self._spawn(self.args, **kwargs)
2390
2391                 os.close(slave_fd)
2392                 if null_input is not None:
2393                         null_input.close()
2394
2395                 if isinstance(retval, int):
2396                         # spawn failed
2397                         self._unregister()
2398                         self.returncode = retval
2399                         self.wait()
2400                         return
2401
2402                 self.pid = retval[0]
2403                 portage.process.spawned_pids.remove(self.pid)
2404
2405         def _pipe(self, fd_pipes):
2406                 """
2407                 @type fd_pipes: dict
2408                 @param fd_pipes: pipes from which to copy terminal size if desired.
2409                 """
2410                 return os.pipe()
2411
2412         def _spawn(self, args, **kwargs):
2413                 return portage.process.spawn(args, **kwargs)
2414
2415         def _output_handler(self, fd, event):
2416
2417                 if event & PollConstants.POLLIN:
2418
2419                         files = self._files
2420                         buf = array.array('B')
2421                         try:
2422                                 buf.fromfile(files.process, self._bufsize)
2423                         except EOFError:
2424                                 pass
2425
2426                         if buf:
2427                                 if not self.background:
2428                                         buf.tofile(files.stdout)
2429                                         files.stdout.flush()
2430                                 buf.tofile(files.log)
2431                                 files.log.flush()
2432                         else:
2433                                 self._unregister()
2434                                 self.wait()
2435
2436                 self._unregister_if_appropriate(event)
2437                 return self._registered
2438
2439         def _dummy_handler(self, fd, event):
2440                 """
2441                 This method is mainly interested in detecting EOF, since
2442                 the only purpose of the pipe is to allow the scheduler to
2443                 monitor the process from inside a poll() loop.
2444                 """
2445
2446                 if event & PollConstants.POLLIN:
2447
2448                         buf = array.array('B')
2449                         try:
2450                                 buf.fromfile(self._files.process, self._bufsize)
2451                         except EOFError:
2452                                 pass
2453
2454                         if buf:
2455                                 pass
2456                         else:
2457                                 self._unregister()
2458                                 self.wait()
2459
2460                 self._unregister_if_appropriate(event)
2461                 return self._registered
2462
2463 class MiscFunctionsProcess(SpawnProcess):
2464         """
2465         Spawns misc-functions.sh with an existing ebuild environment.
2466         """
2467
2468         __slots__ = ("commands", "phase", "pkg", "settings")
2469
2470         def _start(self):
2471                 settings = self.settings
2472                 settings.pop("EBUILD_PHASE", None)
2473                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2474                 misc_sh_binary = os.path.join(portage_bin_path,
2475                         os.path.basename(portage.const.MISC_SH_BINARY))
2476
2477                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2478                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2479
2480                 portage._doebuild_exit_status_unlink(
2481                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2482
2483                 SpawnProcess._start(self)
2484
2485         def _spawn(self, args, **kwargs):
2486                 settings = self.settings
2487                 debug = settings.get("PORTAGE_DEBUG") == "1"
2488                 return portage.spawn(" ".join(args), settings,
2489                         debug=debug, **kwargs)
2490
2491         def _set_returncode(self, wait_retval):
2492                 SpawnProcess._set_returncode(self, wait_retval)
2493                 self.returncode = portage._doebuild_exit_status_check_and_log(
2494                         self.settings, self.phase, self.returncode)
2495
2496 class EbuildFetcher(SpawnProcess):
2497
2498         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2499                 ("_build_dir",)
2500
2501         def _start(self):
2502
2503                 root_config = self.pkg.root_config
2504                 portdb = root_config.trees["porttree"].dbapi
2505                 ebuild_path = portdb.findname(self.pkg.cpv)
2506                 settings = self.config_pool.allocate()
2507                 settings.setcpv(self.pkg)
2508
2509                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2510                 # should not be touched since otherwise it could interfere with
2511                 # another instance of the same cpv concurrently being built for a
2512                 # different $ROOT (currently, builds only cooperate with prefetchers
2513                 # that are spawned for the same $ROOT).
2514                 if not self.prefetch:
2515                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2516                         self._build_dir.lock()
2517                         self._build_dir.clean_log()
2518                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2519                         if self.logfile is None:
2520                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2521
2522                 phase = "fetch"
2523                 if self.fetchall:
2524                         phase = "fetchall"
2525
2526                 # If any incremental variables have been overridden
2527                 # via the environment, those values need to be passed
2528                 # along here so that they are correctly considered by
2529                 # the config instance in the subproccess.
2530                 fetch_env = os.environ.copy()
2531
2532                 nocolor = settings.get("NOCOLOR")
2533                 if nocolor is not None:
2534                         fetch_env["NOCOLOR"] = nocolor
2535
2536                 fetch_env["PORTAGE_NICENESS"] = "0"
2537                 if self.prefetch:
2538                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2539
2540                 ebuild_binary = os.path.join(
2541                         settings["PORTAGE_BIN_PATH"], "ebuild")
2542
2543                 fetch_args = [ebuild_binary, ebuild_path, phase]
2544                 debug = settings.get("PORTAGE_DEBUG") == "1"
2545                 if debug:
2546                         fetch_args.append("--debug")
2547
2548                 self.args = fetch_args
2549                 self.env = fetch_env
2550                 SpawnProcess._start(self)
2551
2552         def _pipe(self, fd_pipes):
2553                 """When appropriate, use a pty so that fetcher progress bars,
2554                 like wget has, will work properly."""
2555                 if self.background or not sys.stdout.isatty():
2556                         # When the output only goes to a log file,
2557                         # there's no point in creating a pty.
2558                         return os.pipe()
2559                 stdout_pipe = fd_pipes.get(1)
2560                 got_pty, master_fd, slave_fd = \
2561                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2562                 return (master_fd, slave_fd)
2563
2564         def _set_returncode(self, wait_retval):
2565                 SpawnProcess._set_returncode(self, wait_retval)
2566                 # Collect elog messages that might have been
2567                 # created by the pkg_nofetch phase.
2568                 if self._build_dir is not None:
2569                         # Skip elog messages for prefetch, in order to avoid duplicates.
2570                         if not self.prefetch and self.returncode != os.EX_OK:
2571                                 elog_out = None
2572                                 if self.logfile is not None:
2573                                         if self.background:
2574                                                 elog_out = open(self.logfile, 'a')
2575                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2576                                 if self.logfile is not None:
2577                                         msg += ", Log file:"
2578                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2579                                 if self.logfile is not None:
2580                                         eerror(" '%s'" % (self.logfile,),
2581                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2582                                 if elog_out is not None:
2583                                         elog_out.close()
2584                         if not self.prefetch:
2585                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2586                         features = self._build_dir.settings.features
2587                         if self.returncode == os.EX_OK:
2588                                 self._build_dir.clean_log()
2589                         self._build_dir.unlock()
2590                         self.config_pool.deallocate(self._build_dir.settings)
2591                         self._build_dir = None
2592
2593 class EbuildBuildDir(SlotObject):
2594
2595         __slots__ = ("dir_path", "pkg", "settings",
2596                 "locked", "_catdir", "_lock_obj")
2597
2598         def __init__(self, **kwargs):
2599                 SlotObject.__init__(self, **kwargs)
2600                 self.locked = False
2601
2602         def lock(self):
2603                 """
2604                 This raises an AlreadyLocked exception if lock() is called
2605                 while a lock is already held. In order to avoid this, call
2606                 unlock() or check whether the "locked" attribute is True
2607                 or False before calling lock().
2608                 """
2609                 if self._lock_obj is not None:
2610                         raise self.AlreadyLocked((self._lock_obj,))
2611
2612                 dir_path = self.dir_path
2613                 if dir_path is None:
2614                         root_config = self.pkg.root_config
2615                         portdb = root_config.trees["porttree"].dbapi
2616                         ebuild_path = portdb.findname(self.pkg.cpv)
2617                         settings = self.settings
2618                         settings.setcpv(self.pkg)
2619                         debug = settings.get("PORTAGE_DEBUG") == "1"
2620                         use_cache = 1 # always true
2621                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2622                                 self.settings, debug, use_cache, portdb)
2623                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2624
2625                 catdir = os.path.dirname(dir_path)
2626                 self._catdir = catdir
2627
2628                 portage.util.ensure_dirs(os.path.dirname(catdir),
2629                         gid=portage.portage_gid,
2630                         mode=070, mask=0)
2631                 catdir_lock = None
2632                 try:
2633                         catdir_lock = portage.locks.lockdir(catdir)
2634                         portage.util.ensure_dirs(catdir,
2635                                 gid=portage.portage_gid,
2636                                 mode=070, mask=0)
2637                         self._lock_obj = portage.locks.lockdir(dir_path)
2638                 finally:
2639                         self.locked = self._lock_obj is not None
2640                         if catdir_lock is not None:
2641                                 portage.locks.unlockdir(catdir_lock)
2642
2643         def clean_log(self):
2644                 """Discard existing log."""
2645                 settings = self.settings
2646
2647                 for x in ('.logid', 'temp/build.log'):
2648                         try:
2649                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2650                         except OSError:
2651                                 pass
2652
2653         def unlock(self):
2654                 if self._lock_obj is None:
2655                         return
2656
2657                 portage.locks.unlockdir(self._lock_obj)
2658                 self._lock_obj = None
2659                 self.locked = False
2660
2661                 catdir = self._catdir
2662                 catdir_lock = None
2663                 try:
2664                         catdir_lock = portage.locks.lockdir(catdir)
2665                 finally:
2666                         if catdir_lock:
2667                                 try:
2668                                         os.rmdir(catdir)
2669                                 except OSError, e:
2670                                         if e.errno not in (errno.ENOENT,
2671                                                 errno.ENOTEMPTY, errno.EEXIST):
2672                                                 raise
2673                                         del e
2674                                 portage.locks.unlockdir(catdir_lock)
2675
2676         class AlreadyLocked(portage.exception.PortageException):
2677                 pass
2678
2679 class EbuildBuild(CompositeTask):
2680
2681         __slots__ = ("args_set", "config_pool", "find_blockers",
2682                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2683                 "prefetcher", "settings", "world_atom") + \
2684                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2685
2686         def _start(self):
2687
2688                 logger = self.logger
2689                 opts = self.opts
2690                 pkg = self.pkg
2691                 settings = self.settings
2692                 world_atom = self.world_atom
2693                 root_config = pkg.root_config
2694                 tree = "porttree"
2695                 self._tree = tree
2696                 portdb = root_config.trees[tree].dbapi
2697                 settings.setcpv(pkg)
2698                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2699                 ebuild_path = portdb.findname(self.pkg.cpv)
2700                 self._ebuild_path = ebuild_path
2701
2702                 prefetcher = self.prefetcher
2703                 if prefetcher is None:
2704                         pass
2705                 elif not prefetcher.isAlive():
2706                         prefetcher.cancel()
2707                 elif prefetcher.poll() is None:
2708
2709                         waiting_msg = "Fetching files " + \
2710                                 "in the background. " + \
2711                                 "To view fetch progress, run `tail -f " + \
2712                                 "/var/log/emerge-fetch.log` in another " + \
2713                                 "terminal."
2714                         msg_prefix = colorize("GOOD", " * ")
2715                         from textwrap import wrap
2716                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2717                                 for line in wrap(waiting_msg, 65))
2718                         if not self.background:
2719                                 writemsg(waiting_msg, noiselevel=-1)
2720
2721                         self._current_task = prefetcher
2722                         prefetcher.addExitListener(self._prefetch_exit)
2723                         return
2724
2725                 self._prefetch_exit(prefetcher)
2726
2727         def _prefetch_exit(self, prefetcher):
2728
2729                 opts = self.opts
2730                 pkg = self.pkg
2731                 settings = self.settings
2732
2733                 if opts.fetchonly:
2734                                 fetcher = EbuildFetchonly(
2735                                         fetch_all=opts.fetch_all_uri,
2736                                         pkg=pkg, pretend=opts.pretend,
2737                                         settings=settings)
2738                                 retval = fetcher.execute()
2739                                 self.returncode = retval
2740                                 self.wait()
2741                                 return
2742
2743                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2744                         fetchall=opts.fetch_all_uri,
2745                         fetchonly=opts.fetchonly,
2746                         background=self.background,
2747                         pkg=pkg, scheduler=self.scheduler)
2748
2749                 self._start_task(fetcher, self._fetch_exit)
2750
2751         def _fetch_exit(self, fetcher):
2752                 opts = self.opts
2753                 pkg = self.pkg
2754
2755                 fetch_failed = False
2756                 if opts.fetchonly:
2757                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2758                 else:
2759                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2760
2761                 if fetch_failed and fetcher.logfile is not None and \
2762                         os.path.exists(fetcher.logfile):
2763                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2764
2765                 if not fetch_failed and fetcher.logfile is not None:
2766                         # Fetch was successful, so remove the fetch log.
2767                         try:
2768                                 os.unlink(fetcher.logfile)
2769                         except OSError:
2770                                 pass
2771
2772                 if fetch_failed or opts.fetchonly:
2773                         self.wait()
2774                         return
2775
2776                 logger = self.logger
2777                 opts = self.opts
2778                 pkg_count = self.pkg_count
2779                 scheduler = self.scheduler
2780                 settings = self.settings
2781                 features = settings.features
2782                 ebuild_path = self._ebuild_path
2783                 system_set = pkg.root_config.sets["system"]
2784
2785                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2786                 self._build_dir.lock()
2787
2788                 # Cleaning is triggered before the setup
2789                 # phase, in portage.doebuild().
2790                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2791                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2792                 short_msg = "emerge: (%s of %s) %s Clean" % \
2793                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2794                 logger.log(msg, short_msg=short_msg)
2795
2796                 #buildsyspkg: Check if we need to _force_ binary package creation
2797                 self._issyspkg = "buildsyspkg" in features and \
2798                                 system_set.findAtomForPackage(pkg) and \
2799                                 not opts.buildpkg
2800
2801                 if opts.buildpkg or self._issyspkg:
2802
2803                         self._buildpkg = True
2804
2805                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2806                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2807                         short_msg = "emerge: (%s of %s) %s Compile" % \
2808                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2809                         logger.log(msg, short_msg=short_msg)
2810
2811                 else:
2812                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2813                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2814                         short_msg = "emerge: (%s of %s) %s Compile" % \
2815                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2816                         logger.log(msg, short_msg=short_msg)
2817
2818                 build = EbuildExecuter(background=self.background, pkg=pkg,
2819                         scheduler=scheduler, settings=settings)
2820                 self._start_task(build, self._build_exit)
2821
2822         def _unlock_builddir(self):
2823                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2824                 self._build_dir.unlock()
2825
2826         def _build_exit(self, build):
2827                 if self._default_exit(build) != os.EX_OK:
2828                         self._unlock_builddir()
2829                         self.wait()
2830                         return
2831
2832                 opts = self.opts
2833                 buildpkg = self._buildpkg
2834
2835                 if not buildpkg:
2836                         self._final_exit(build)
2837                         self.wait()
2838                         return
2839
2840                 if self._issyspkg:
2841                         msg = ">>> This is a system package, " + \
2842                                 "let's pack a rescue tarball.\n"
2843
2844                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2845                         if log_path is not None:
2846                                 log_file = open(log_path, 'a')
2847                                 try:
2848                                         log_file.write(msg)
2849                                 finally:
2850                                         log_file.close()
2851
2852                         if not self.background:
2853                                 portage.writemsg_stdout(msg, noiselevel=-1)
2854
2855                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2856                         scheduler=self.scheduler, settings=self.settings)
2857
2858                 self._start_task(packager, self._buildpkg_exit)
2859
2860         def _buildpkg_exit(self, packager):
2861                 """
2862                 Released build dir lock when there is a failure or
2863                 when in buildpkgonly mode. Otherwise, the lock will
2864                 be released when merge() is called.
2865                 """
2866
2867                 if self._default_exit(packager) != os.EX_OK:
2868                         self._unlock_builddir()
2869                         self.wait()
2870                         return
2871
2872                 if self.opts.buildpkgonly:
2873                         # Need to call "clean" phase for buildpkgonly mode
2874                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2875                         phase = "clean"
2876                         clean_phase = EbuildPhase(background=self.background,
2877                                 pkg=self.pkg, phase=phase,
2878                                 scheduler=self.scheduler, settings=self.settings,
2879                                 tree=self._tree)
2880                         self._start_task(clean_phase, self._clean_exit)
2881                         return
2882
2883                 # Continue holding the builddir lock until
2884                 # after the package has been installed.
2885                 self._current_task = None
2886                 self.returncode = packager.returncode
2887                 self.wait()
2888
2889         def _clean_exit(self, clean_phase):
2890                 if self._final_exit(clean_phase) != os.EX_OK or \
2891                         self.opts.buildpkgonly:
2892                         self._unlock_builddir()
2893                 self.wait()
2894
2895         def install(self):
2896                 """
2897                 Install the package and then clean up and release locks.
2898                 Only call this after the build has completed successfully
2899                 and neither fetchonly nor buildpkgonly mode are enabled.
2900                 """
2901
2902                 find_blockers = self.find_blockers
2903                 ldpath_mtimes = self.ldpath_mtimes
2904                 logger = self.logger
2905                 pkg = self.pkg
2906                 pkg_count = self.pkg_count
2907                 settings = self.settings
2908                 world_atom = self.world_atom
2909                 ebuild_path = self._ebuild_path
2910                 tree = self._tree
2911
2912                 merge = EbuildMerge(find_blockers=self.find_blockers,
2913                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2914                         pkg_count=pkg_count, pkg_path=ebuild_path,
2915                         scheduler=self.scheduler,
2916                         settings=settings, tree=tree, world_atom=world_atom)
2917
2918                 msg = " === (%s of %s) Merging (%s::%s)" % \
2919                         (pkg_count.curval, pkg_count.maxval,
2920                         pkg.cpv, ebuild_path)
2921                 short_msg = "emerge: (%s of %s) %s Merge" % \
2922                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2923                 logger.log(msg, short_msg=short_msg)
2924
2925                 try:
2926                         rval = merge.execute()
2927                 finally:
2928                         self._unlock_builddir()
2929
2930                 return rval
2931
2932 class EbuildExecuter(CompositeTask):
2933
2934         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2935
2936         _phases = ("prepare", "configure", "compile", "test", "install")
2937
2938         _live_eclasses = frozenset([
2939                 "bzr",
2940                 "cvs",
2941                 "darcs",
2942                 "git",
2943                 "mercurial",
2944                 "subversion"
2945         ])
2946
2947         def _start(self):
2948                 self._tree = "porttree"
2949                 pkg = self.pkg
2950                 phase = "clean"
2951                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2952                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2953                 self._start_task(clean_phase, self._clean_phase_exit)
2954
2955         def _clean_phase_exit(self, clean_phase):
2956
2957                 if self._default_exit(clean_phase) != os.EX_OK:
2958                         self.wait()
2959                         return
2960
2961                 pkg = self.pkg
2962                 scheduler = self.scheduler
2963                 settings = self.settings
2964                 cleanup = 1
2965
2966                 # This initializes PORTAGE_LOG_FILE.
2967                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2968
2969                 setup_phase = EbuildPhase(background=self.background,
2970                         pkg=pkg, phase="setup", scheduler=scheduler,
2971                         settings=settings, tree=self._tree)
2972
2973                 setup_phase.addExitListener(self._setup_exit)
2974                 self._current_task = setup_phase
2975                 self.scheduler.scheduleSetup(setup_phase)
2976
2977         def _setup_exit(self, setup_phase):
2978
2979                 if self._default_exit(setup_phase) != os.EX_OK:
2980                         self.wait()
2981                         return
2982
2983                 unpack_phase = EbuildPhase(background=self.background,
2984                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2985                         settings=self.settings, tree=self._tree)
2986
2987                 if self._live_eclasses.intersection(self.pkg.inherited):
2988                         # Serialize $DISTDIR access for live ebuilds since
2989                         # otherwise they can interfere with eachother.
2990
2991                         unpack_phase.addExitListener(self._unpack_exit)
2992                         self._current_task = unpack_phase
2993                         self.scheduler.scheduleUnpack(unpack_phase)
2994
2995                 else:
2996                         self._start_task(unpack_phase, self._unpack_exit)
2997
2998         def _unpack_exit(self, unpack_phase):
2999
3000                 if self._default_exit(unpack_phase) != os.EX_OK:
3001                         self.wait()
3002                         return
3003
3004                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3005
3006                 pkg = self.pkg
3007                 phases = self._phases
3008                 eapi = pkg.metadata["EAPI"]
3009                 if eapi in ("0", "1"):
3010                         # skip src_prepare and src_configure
3011                         phases = phases[2:]
3012
3013                 for phase in phases:
3014                         ebuild_phases.add(EbuildPhase(background=self.background,
3015                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3016                                 settings=self.settings, tree=self._tree))
3017
3018                 self._start_task(ebuild_phases, self._default_final_exit)
3019
3020 class EbuildMetadataPhase(SubProcess):
3021
3022         """
3023         Asynchronous interface for the ebuild "depend" phase which is
3024         used to extract metadata from the ebuild.
3025         """
3026
3027         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3028                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3029                 ("_raw_metadata",)
3030
3031         _file_names = ("ebuild",)
3032         _files_dict = slot_dict_class(_file_names, prefix="")
3033         _metadata_fd = 9
3034
3035         def _start(self):
3036                 settings = self.settings
3037                 settings.setcpv(self.cpv)
3038                 ebuild_path = self.ebuild_path
3039
3040                 eapi = None
3041                 if 'parse-eapi-glep-55' in settings.features:
3042                         pf, eapi = portage._split_ebuild_name_glep55(
3043                                 os.path.basename(ebuild_path))
3044                 if eapi is None and \
3045                         'parse-eapi-ebuild-head' in settings.features:
3046                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3047                                 mode='r', encoding='utf_8', errors='replace'))
3048
3049                 if eapi is not None:
3050                         if not portage.eapi_is_supported(eapi):
3051                                 self.metadata_callback(self.cpv, self.ebuild_path,
3052                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3053                                 self.returncode = os.EX_OK
3054                                 self.wait()
3055                                 return
3056
3057                         settings.configdict['pkg']['EAPI'] = eapi
3058
3059                 debug = settings.get("PORTAGE_DEBUG") == "1"
3060                 master_fd = None
3061                 slave_fd = None
3062                 fd_pipes = None
3063                 if self.fd_pipes is not None:
3064                         fd_pipes = self.fd_pipes.copy()
3065                 else:
3066                         fd_pipes = {}
3067
3068                 fd_pipes.setdefault(0, sys.stdin.fileno())
3069                 fd_pipes.setdefault(1, sys.stdout.fileno())
3070                 fd_pipes.setdefault(2, sys.stderr.fileno())
3071
3072                 # flush any pending output
3073                 for fd in fd_pipes.itervalues():
3074                         if fd == sys.stdout.fileno():
3075                                 sys.stdout.flush()
3076                         if fd == sys.stderr.fileno():
3077                                 sys.stderr.flush()
3078
3079                 fd_pipes_orig = fd_pipes.copy()
3080                 self._files = self._files_dict()
3081                 files = self._files
3082
3083                 master_fd, slave_fd = os.pipe()
3084                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3085                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3086
3087                 fd_pipes[self._metadata_fd] = slave_fd
3088
3089                 self._raw_metadata = []
3090                 files.ebuild = os.fdopen(master_fd, 'r')
3091                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3092                         self._registered_events, self._output_handler)
3093                 self._registered = True
3094
3095                 retval = portage.doebuild(ebuild_path, "depend",
3096                         settings["ROOT"], settings, debug,
3097                         mydbapi=self.portdb, tree="porttree",
3098                         fd_pipes=fd_pipes, returnpid=True)
3099
3100                 os.close(slave_fd)
3101
3102                 if isinstance(retval, int):
3103                         # doebuild failed before spawning
3104                         self._unregister()
3105                         self.returncode = retval
3106                         self.wait()
3107                         return
3108
3109                 self.pid = retval[0]
3110                 portage.process.spawned_pids.remove(self.pid)
3111
3112         def _output_handler(self, fd, event):
3113
3114                 if event & PollConstants.POLLIN:
3115                         self._raw_metadata.append(self._files.ebuild.read())
3116                         if not self._raw_metadata[-1]:
3117                                 self._unregister()
3118                                 self.wait()
3119
3120                 self._unregister_if_appropriate(event)
3121                 return self._registered
3122
3123         def _set_returncode(self, wait_retval):
3124                 SubProcess._set_returncode(self, wait_retval)
3125                 if self.returncode == os.EX_OK:
3126                         metadata_lines = "".join(self._raw_metadata).splitlines()
3127                         if len(portage.auxdbkeys) != len(metadata_lines):
3128                                 # Don't trust bash's returncode if the
3129                                 # number of lines is incorrect.
3130                                 self.returncode = 1
3131                         else:
3132                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3133                                 self.metadata_callback(self.cpv, self.ebuild_path,
3134                                         self.repo_path, metadata, self.ebuild_mtime)
3135
3136 class EbuildProcess(SpawnProcess):
3137
3138         __slots__ = ("phase", "pkg", "settings", "tree")
3139
3140         def _start(self):
3141                 # Don't open the log file during the clean phase since the
3142                 # open file can result in an nfs lock on $T/build.log which
3143                 # prevents the clean phase from removing $T.
3144                 if self.phase not in ("clean", "cleanrm"):
3145                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3146                 SpawnProcess._start(self)
3147
3148         def _pipe(self, fd_pipes):
3149                 stdout_pipe = fd_pipes.get(1)
3150                 got_pty, master_fd, slave_fd = \
3151                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3152                 return (master_fd, slave_fd)
3153
3154         def _spawn(self, args, **kwargs):
3155
3156                 root_config = self.pkg.root_config
3157                 tree = self.tree
3158                 mydbapi = root_config.trees[tree].dbapi
3159                 settings = self.settings
3160                 ebuild_path = settings["EBUILD"]
3161                 debug = settings.get("PORTAGE_DEBUG") == "1"
3162
3163                 rval = portage.doebuild(ebuild_path, self.phase,
3164                         root_config.root, settings, debug,
3165                         mydbapi=mydbapi, tree=tree, **kwargs)
3166
3167                 return rval
3168
3169         def _set_returncode(self, wait_retval):
3170                 SpawnProcess._set_returncode(self, wait_retval)
3171
3172                 if self.phase not in ("clean", "cleanrm"):
3173                         self.returncode = portage._doebuild_exit_status_check_and_log(
3174                                 self.settings, self.phase, self.returncode)
3175
3176                 if self.phase == "test" and self.returncode != os.EX_OK and \
3177                         "test-fail-continue" in self.settings.features:
3178                         self.returncode = os.EX_OK
3179
3180                 portage._post_phase_userpriv_perms(self.settings)
3181
3182 class EbuildPhase(CompositeTask):
3183
3184         __slots__ = ("background", "pkg", "phase",
3185                 "scheduler", "settings", "tree")
3186
3187         _post_phase_cmds = portage._post_phase_cmds
3188
3189         def _start(self):
3190
3191                 ebuild_process = EbuildProcess(background=self.background,
3192                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3193                         settings=self.settings, tree=self.tree)
3194
3195                 self._start_task(ebuild_process, self._ebuild_exit)
3196
3197         def _ebuild_exit(self, ebuild_process):
3198
3199                 if self.phase == "install":
3200                         out = None
3201                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3202                         log_file = None
3203                         if self.background and log_path is not None:
3204                                 log_file = open(log_path, 'a')
3205                                 out = log_file
3206                         try:
3207                                 portage._check_build_log(self.settings, out=out)
3208                         finally:
3209                                 if log_file is not None:
3210                                         log_file.close()
3211
3212                 if self._default_exit(ebuild_process) != os.EX_OK:
3213                         self.wait()
3214                         return
3215
3216                 settings = self.settings
3217
3218                 if self.phase == "install":
3219                         portage._post_src_install_chost_fix(settings)
3220                         portage._post_src_install_uid_fix(settings)
3221
3222                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3223                 if post_phase_cmds is not None:
3224                         post_phase = MiscFunctionsProcess(background=self.background,
3225                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3226                                 scheduler=self.scheduler, settings=settings)
3227                         self._start_task(post_phase, self._post_phase_exit)
3228                         return
3229
3230                 self.returncode = ebuild_process.returncode
3231                 self._current_task = None
3232                 self.wait()
3233
3234         def _post_phase_exit(self, post_phase):
3235                 if self._final_exit(post_phase) != os.EX_OK:
3236                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3237                                 noiselevel=-1)
3238                 self._current_task = None
3239                 self.wait()
3240                 return
3241
3242 class EbuildBinpkg(EbuildProcess):
3243         """
3244         This assumes that src_install() has successfully completed.
3245         """
3246         __slots__ = ("_binpkg_tmpfile",)
3247
3248         def _start(self):
3249                 self.phase = "package"
3250                 self.tree = "porttree"
3251                 pkg = self.pkg
3252                 root_config = pkg.root_config
3253                 portdb = root_config.trees["porttree"].dbapi
3254                 bintree = root_config.trees["bintree"]
3255                 ebuild_path = portdb.findname(self.pkg.cpv)
3256                 settings = self.settings
3257                 debug = settings.get("PORTAGE_DEBUG") == "1"
3258
3259                 bintree.prevent_collision(pkg.cpv)
3260                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3261                         pkg.cpv + ".tbz2." + str(os.getpid()))
3262                 self._binpkg_tmpfile = binpkg_tmpfile
3263                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3264                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3265
3266                 try:
3267                         EbuildProcess._start(self)
3268                 finally:
3269                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3270
3271         def _set_returncode(self, wait_retval):
3272                 EbuildProcess._set_returncode(self, wait_retval)
3273
3274                 pkg = self.pkg
3275                 bintree = pkg.root_config.trees["bintree"]
3276                 binpkg_tmpfile = self._binpkg_tmpfile
3277                 if self.returncode == os.EX_OK:
3278                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3279
3280 class EbuildMerge(SlotObject):
3281
3282         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3283                 "pkg", "pkg_count", "pkg_path", "pretend",
3284                 "scheduler", "settings", "tree", "world_atom")
3285
3286         def execute(self):
3287                 root_config = self.pkg.root_config
3288                 settings = self.settings
3289                 retval = portage.merge(settings["CATEGORY"],
3290                         settings["PF"], settings["D"],
3291                         os.path.join(settings["PORTAGE_BUILDDIR"],
3292                         "build-info"), root_config.root, settings,
3293                         myebuild=settings["EBUILD"],
3294                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3295                         vartree=root_config.trees["vartree"],
3296                         prev_mtimes=self.ldpath_mtimes,
3297                         scheduler=self.scheduler,
3298                         blockers=self.find_blockers)
3299
3300                 if retval == os.EX_OK:
3301                         self.world_atom(self.pkg)
3302                         self._log_success()
3303
3304                 return retval
3305
3306         def _log_success(self):
3307                 pkg = self.pkg
3308                 pkg_count = self.pkg_count
3309                 pkg_path = self.pkg_path
3310                 logger = self.logger
3311                 if "noclean" not in self.settings.features:
3312                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3313                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3314                         logger.log((" === (%s of %s) " + \
3315                                 "Post-Build Cleaning (%s::%s)") % \
3316                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3317                                 short_msg=short_msg)
3318                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3319                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3320
3321 class PackageUninstall(AsynchronousTask):
3322
3323         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3324
3325         def _start(self):
3326                 try:
3327                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3328                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3329                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3330                                 writemsg_level=self._writemsg_level)
3331                 except UninstallFailure, e:
3332                         self.returncode = e.status
3333                 else:
3334                         self.returncode = os.EX_OK
3335                 self.wait()
3336
3337         def _writemsg_level(self, msg, level=0, noiselevel=0):
3338
3339                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3340                 background = self.background
3341
3342                 if log_path is None:
3343                         if not (background and level < logging.WARNING):
3344                                 portage.util.writemsg_level(msg,
3345                                         level=level, noiselevel=noiselevel)
3346                 else:
3347                         if not background:
3348                                 portage.util.writemsg_level(msg,
3349                                         level=level, noiselevel=noiselevel)
3350
3351                         f = open(log_path, 'a')
3352                         try:
3353                                 f.write(msg)
3354                         finally:
3355                                 f.close()
3356
3357 class Binpkg(CompositeTask):
3358
3359         __slots__ = ("find_blockers",
3360                 "ldpath_mtimes", "logger", "opts",
3361                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3362                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3363                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3364
3365         def _writemsg_level(self, msg, level=0, noiselevel=0):
3366
3367                 if not self.background:
3368                         portage.util.writemsg_level(msg,
3369                                 level=level, noiselevel=noiselevel)
3370
3371                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3372                 if  log_path is not None:
3373                         f = open(log_path, 'a')
3374                         try:
3375                                 f.write(msg)
3376                         finally:
3377                                 f.close()
3378
3379         def _start(self):
3380
3381                 pkg = self.pkg
3382                 settings = self.settings
3383                 settings.setcpv(pkg)
3384                 self._tree = "bintree"
3385                 self._bintree = self.pkg.root_config.trees[self._tree]
3386                 self._verify = not self.opts.pretend
3387
3388                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3389                         "portage", pkg.category, pkg.pf)
3390                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3391                         pkg=pkg, settings=settings)
3392                 self._image_dir = os.path.join(dir_path, "image")
3393                 self._infloc = os.path.join(dir_path, "build-info")
3394                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3395                 settings["EBUILD"] = self._ebuild_path
3396                 debug = settings.get("PORTAGE_DEBUG") == "1"
3397                 portage.doebuild_environment(self._ebuild_path, "setup",
3398                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3399                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3400
3401                 # The prefetcher has already completed or it
3402                 # could be running now. If it's running now,
3403                 # wait for it to complete since it holds
3404                 # a lock on the file being fetched. The
3405                 # portage.locks functions are only designed
3406                 # to work between separate processes. Since
3407                 # the lock is held by the current process,
3408                 # use the scheduler and fetcher methods to
3409                 # synchronize with the fetcher.
3410                 prefetcher = self.prefetcher
3411                 if prefetcher is None:
3412                         pass
3413                 elif not prefetcher.isAlive():
3414                         prefetcher.cancel()
3415                 elif prefetcher.poll() is None:
3416
3417                         waiting_msg = ("Fetching '%s' " + \
3418                                 "in the background. " + \
3419                                 "To view fetch progress, run `tail -f " + \
3420                                 "/var/log/emerge-fetch.log` in another " + \
3421                                 "terminal.") % prefetcher.pkg_path
3422                         msg_prefix = colorize("GOOD", " * ")
3423                         from textwrap import wrap
3424                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3425                                 for line in wrap(waiting_msg, 65))
3426                         if not self.background:
3427                                 writemsg(waiting_msg, noiselevel=-1)
3428
3429                         self._current_task = prefetcher
3430                         prefetcher.addExitListener(self._prefetch_exit)
3431                         return
3432
3433                 self._prefetch_exit(prefetcher)
3434
3435         def _prefetch_exit(self, prefetcher):
3436
3437                 pkg = self.pkg
3438                 pkg_count = self.pkg_count
3439                 if not (self.opts.pretend or self.opts.fetchonly):
3440                         self._build_dir.lock()
3441                         # If necessary, discard old log so that we don't
3442                         # append to it.
3443                         self._build_dir.clean_log()
3444                         # Initialze PORTAGE_LOG_FILE.
3445                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3446                 fetcher = BinpkgFetcher(background=self.background,
3447                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3448                         pretend=self.opts.pretend, scheduler=self.scheduler)
3449                 pkg_path = fetcher.pkg_path
3450                 self._pkg_path = pkg_path
3451
3452                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3453
3454                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3455                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3456                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3457                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3458                         self.logger.log(msg, short_msg=short_msg)
3459                         self._start_task(fetcher, self._fetcher_exit)
3460                         return
3461
3462                 self._fetcher_exit(fetcher)
3463
3464         def _fetcher_exit(self, fetcher):
3465
3466                 # The fetcher only has a returncode when
3467                 # --getbinpkg is enabled.
3468                 if fetcher.returncode is not None:
3469                         self._fetched_pkg = True
3470                         if self._default_exit(fetcher) != os.EX_OK:
3471                                 self._unlock_builddir()
3472                                 self.wait()
3473                                 return
3474
3475                 if self.opts.pretend:
3476                         self._current_task = None
3477                         self.returncode = os.EX_OK
3478                         self.wait()
3479                         return
3480
3481                 verifier = None
3482                 if self._verify:
3483                         logfile = None
3484                         if self.background:
3485                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3486                         verifier = BinpkgVerifier(background=self.background,
3487                                 logfile=logfile, pkg=self.pkg)
3488                         self._start_task(verifier, self._verifier_exit)
3489                         return
3490
3491                 self._verifier_exit(verifier)
3492
3493         def _verifier_exit(self, verifier):
3494                 if verifier is not None and \
3495                         self._default_exit(verifier) != os.EX_OK:
3496                         self._unlock_builddir()
3497                         self.wait()
3498                         return
3499
3500                 logger = self.logger
3501                 pkg = self.pkg
3502                 pkg_count = self.pkg_count
3503                 pkg_path = self._pkg_path
3504
3505                 if self._fetched_pkg:
3506                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3507
3508                 if self.opts.fetchonly:
3509                         self._current_task = None
3510                         self.returncode = os.EX_OK
3511                         self.wait()
3512                         return
3513
3514                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3515                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3516                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3517                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3518                 logger.log(msg, short_msg=short_msg)
3519
3520                 phase = "clean"
3521                 settings = self.settings
3522                 ebuild_phase = EbuildPhase(background=self.background,
3523                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3524                         settings=settings, tree=self._tree)
3525
3526                 self._start_task(ebuild_phase, self._clean_exit)
3527
3528         def _clean_exit(self, clean_phase):
3529                 if self._default_exit(clean_phase) != os.EX_OK:
3530                         self._unlock_builddir()
3531                         self.wait()
3532                         return
3533
3534                 dir_path = self._build_dir.dir_path
3535
3536                 infloc = self._infloc
3537                 pkg = self.pkg
3538                 pkg_path = self._pkg_path
3539
3540                 dir_mode = 0755
3541                 for mydir in (dir_path, self._image_dir, infloc):
3542                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3543                                 gid=portage.data.portage_gid, mode=dir_mode)
3544
3545                 # This initializes PORTAGE_LOG_FILE.
3546                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3547                 self._writemsg_level(">>> Extracting info\n")
3548
3549                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3550                 check_missing_metadata = ("CATEGORY", "PF")
3551                 missing_metadata = set()
3552                 for k in check_missing_metadata:
3553                         v = pkg_xpak.getfile(k)
3554                         if not v:
3555                                 missing_metadata.add(k)
3556
3557                 pkg_xpak.unpackinfo(infloc)
3558                 for k in missing_metadata:
3559                         if k == "CATEGORY":
3560                                 v = pkg.category
3561                         elif k == "PF":
3562                                 v = pkg.pf
3563                         else:
3564                                 continue
3565
3566                         f = open(os.path.join(infloc, k), 'wb')
3567                         try:
3568                                 f.write(v + "\n")
3569                         finally:
3570                                 f.close()
3571
3572                 # Store the md5sum in the vdb.
3573                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3574                 try:
3575                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3576                 finally:
3577                         f.close()
3578
3579                 # This gives bashrc users an opportunity to do various things
3580                 # such as remove binary packages after they're installed.
3581                 settings = self.settings
3582                 settings.setcpv(self.pkg)
3583                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3584                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3585
3586                 phase = "setup"
3587                 setup_phase = EbuildPhase(background=self.background,
3588                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3589                         settings=settings, tree=self._tree)
3590
3591                 setup_phase.addExitListener(self._setup_exit)
3592                 self._current_task = setup_phase
3593                 self.scheduler.scheduleSetup(setup_phase)
3594
3595         def _setup_exit(self, setup_phase):
3596                 if self._default_exit(setup_phase) != os.EX_OK:
3597                         self._unlock_builddir()
3598                         self.wait()
3599                         return
3600
3601                 extractor = BinpkgExtractorAsync(background=self.background,
3602                         image_dir=self._image_dir,
3603                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3604                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3605                 self._start_task(extractor, self._extractor_exit)
3606
3607         def _extractor_exit(self, extractor):
3608                 if self._final_exit(extractor) != os.EX_OK:
3609                         self._unlock_builddir()
3610                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3611                                 noiselevel=-1)
3612                 self.wait()
3613
3614         def _unlock_builddir(self):
3615                 if self.opts.pretend or self.opts.fetchonly:
3616                         return
3617                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3618                 self._build_dir.unlock()
3619
3620         def install(self):
3621
3622                 # This gives bashrc users an opportunity to do various things
3623                 # such as remove binary packages after they're installed.
3624                 settings = self.settings
3625                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3626                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3627
3628                 merge = EbuildMerge(find_blockers=self.find_blockers,
3629                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3630                         pkg=self.pkg, pkg_count=self.pkg_count,
3631                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3632                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3633
3634                 try:
3635                         retval = merge.execute()
3636                 finally:
3637                         settings.pop("PORTAGE_BINPKG_FILE", None)
3638                         self._unlock_builddir()
3639                 return retval
3640
3641 class BinpkgFetcher(SpawnProcess):
3642
3643         __slots__ = ("pkg", "pretend",
3644                 "locked", "pkg_path", "_lock_obj")
3645
3646         def __init__(self, **kwargs):
3647                 SpawnProcess.__init__(self, **kwargs)
3648                 pkg = self.pkg
3649                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3650
3651         def _start(self):
3652
3653                 if self.cancelled:
3654                         return
3655
3656                 pkg = self.pkg
3657                 pretend = self.pretend
3658                 bintree = pkg.root_config.trees["bintree"]
3659                 settings = bintree.settings
3660                 use_locks = "distlocks" in settings.features
3661                 pkg_path = self.pkg_path
3662
3663                 if not pretend:
3664                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3665                         if use_locks:
3666                                 self.lock()
3667                 exists = os.path.exists(pkg_path)
3668                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3669                 if not (pretend or resume):
3670                         # Remove existing file or broken symlink.
3671                         try:
3672                                 os.unlink(pkg_path)
3673                         except OSError:
3674                                 pass
3675
3676                 # urljoin doesn't work correctly with
3677                 # unrecognized protocols like sftp
3678                 if bintree._remote_has_index:
3679                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3680                         if not rel_uri:
3681                                 rel_uri = pkg.cpv + ".tbz2"
3682                         uri = bintree._remote_base_uri.rstrip("/") + \
3683                                 "/" + rel_uri.lstrip("/")
3684                 else:
3685                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3686                                 "/" + pkg.pf + ".tbz2"
3687
3688                 if pretend:
3689                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3690                         self.returncode = os.EX_OK
3691                         self.wait()
3692                         return
3693
3694                 protocol = urlparse.urlparse(uri)[0]
3695                 fcmd_prefix = "FETCHCOMMAND"
3696                 if resume:
3697                         fcmd_prefix = "RESUMECOMMAND"
3698                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3699                 if not fcmd:
3700                         fcmd = settings.get(fcmd_prefix)
3701
3702                 fcmd_vars = {
3703                         "DISTDIR" : os.path.dirname(pkg_path),
3704                         "URI"     : uri,
3705                         "FILE"    : os.path.basename(pkg_path)
3706                 }
3707
3708                 fetch_env = dict(settings.iteritems())
3709                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3710                         for x in shlex.split(fcmd)]
3711
3712                 if self.fd_pipes is None:
3713                         self.fd_pipes = {}
3714                 fd_pipes = self.fd_pipes
3715
3716                 # Redirect all output to stdout since some fetchers like
3717                 # wget pollute stderr (if portage detects a problem then it
3718                 # can send it's own message to stderr).
3719                 fd_pipes.setdefault(0, sys.stdin.fileno())
3720                 fd_pipes.setdefault(1, sys.stdout.fileno())
3721                 fd_pipes.setdefault(2, sys.stdout.fileno())
3722
3723                 self.args = fetch_args
3724                 self.env = fetch_env
3725                 SpawnProcess._start(self)
3726
3727         def _set_returncode(self, wait_retval):
3728                 SpawnProcess._set_returncode(self, wait_retval)
3729                 if self.returncode == os.EX_OK:
3730                         # If possible, update the mtime to match the remote package if
3731                         # the fetcher didn't already do it automatically.
3732                         bintree = self.pkg.root_config.trees["bintree"]
3733                         if bintree._remote_has_index:
3734                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3735                                 if remote_mtime is not None:
3736                                         try:
3737                                                 remote_mtime = long(remote_mtime)
3738                                         except ValueError:
3739                                                 pass
3740                                         else:
3741                                                 try:
3742                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3743                                                 except OSError:
3744                                                         pass
3745                                                 else:
3746                                                         if remote_mtime != local_mtime:
3747                                                                 try:
3748                                                                         os.utime(self.pkg_path,
3749                                                                                 (remote_mtime, remote_mtime))
3750                                                                 except OSError:
3751                                                                         pass
3752
3753                 if self.locked:
3754                         self.unlock()
3755
3756         def lock(self):
3757                 """
3758                 This raises an AlreadyLocked exception if lock() is called
3759                 while a lock is already held. In order to avoid this, call
3760                 unlock() or check whether the "locked" attribute is True
3761                 or False before calling lock().
3762                 """
3763                 if self._lock_obj is not None:
3764                         raise self.AlreadyLocked((self._lock_obj,))
3765
3766                 self._lock_obj = portage.locks.lockfile(
3767                         self.pkg_path, wantnewlockfile=1)
3768                 self.locked = True
3769
3770         class AlreadyLocked(portage.exception.PortageException):
3771                 pass
3772
3773         def unlock(self):
3774                 if self._lock_obj is None:
3775                         return
3776                 portage.locks.unlockfile(self._lock_obj)
3777                 self._lock_obj = None
3778                 self.locked = False
3779
3780 class BinpkgVerifier(AsynchronousTask):
3781         __slots__ = ("logfile", "pkg",)
3782
3783         def _start(self):
3784                 """
3785                 Note: Unlike a normal AsynchronousTask.start() method,
3786                 this one does all work is synchronously. The returncode
3787                 attribute will be set before it returns.
3788                 """
3789
3790                 pkg = self.pkg
3791                 root_config = pkg.root_config
3792                 bintree = root_config.trees["bintree"]
3793                 rval = os.EX_OK
3794                 stdout_orig = sys.stdout
3795                 stderr_orig = sys.stderr
3796                 log_file = None
3797                 if self.background and self.logfile is not None:
3798                         log_file = open(self.logfile, 'a')
3799                 try:
3800                         if log_file is not None:
3801                                 sys.stdout = log_file
3802                                 sys.stderr = log_file
3803                         try:
3804                                 bintree.digestCheck(pkg)
3805                         except portage.exception.FileNotFound:
3806                                 writemsg("!!! Fetching Binary failed " + \
3807                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3808                                 rval = 1
3809                         except portage.exception.DigestException, e:
3810                                 writemsg("\n!!! Digest verification failed:\n",
3811                                         noiselevel=-1)
3812                                 writemsg("!!! %s\n" % e.value[0],
3813                                         noiselevel=-1)
3814                                 writemsg("!!! Reason: %s\n" % e.value[1],
3815                                         noiselevel=-1)
3816                                 writemsg("!!! Got: %s\n" % e.value[2],
3817                                         noiselevel=-1)
3818                                 writemsg("!!! Expected: %s\n" % e.value[3],
3819                                         noiselevel=-1)
3820                                 rval = 1
3821                         if rval != os.EX_OK:
3822                                 pkg_path = bintree.getname(pkg.cpv)
3823                                 head, tail = os.path.split(pkg_path)
3824                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3825                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3826                                         noiselevel=-1)
3827                 finally:
3828                         sys.stdout = stdout_orig
3829                         sys.stderr = stderr_orig
3830                         if log_file is not None:
3831                                 log_file.close()
3832
3833                 self.returncode = rval
3834                 self.wait()
3835
3836 class BinpkgPrefetcher(CompositeTask):
3837
3838         __slots__ = ("pkg",) + \
3839                 ("pkg_path", "_bintree",)
3840
3841         def _start(self):
3842                 self._bintree = self.pkg.root_config.trees["bintree"]
3843                 fetcher = BinpkgFetcher(background=self.background,
3844                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3845                         scheduler=self.scheduler)
3846                 self.pkg_path = fetcher.pkg_path
3847                 self._start_task(fetcher, self._fetcher_exit)
3848
3849         def _fetcher_exit(self, fetcher):
3850
3851                 if self._default_exit(fetcher) != os.EX_OK:
3852                         self.wait()
3853                         return
3854
3855                 verifier = BinpkgVerifier(background=self.background,
3856                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3857                 self._start_task(verifier, self._verifier_exit)
3858
3859         def _verifier_exit(self, verifier):
3860                 if self._default_exit(verifier) != os.EX_OK:
3861                         self.wait()
3862                         return
3863
3864                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3865
3866                 self._current_task = None
3867                 self.returncode = os.EX_OK
3868                 self.wait()
3869
3870 class BinpkgExtractorAsync(SpawnProcess):
3871
3872         __slots__ = ("image_dir", "pkg", "pkg_path")
3873
3874         _shell_binary = portage.const.BASH_BINARY
3875
3876         def _start(self):
3877                 self.args = [self._shell_binary, "-c",
3878                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3879                         (portage._shell_quote(self.pkg_path),
3880                         portage._shell_quote(self.image_dir))]
3881
3882                 self.env = self.pkg.root_config.settings.environ()
3883                 SpawnProcess._start(self)
3884
3885 class MergeListItem(CompositeTask):
3886
3887         """
3888         TODO: For parallel scheduling, everything here needs asynchronous
3889         execution support (start, poll, and wait methods).
3890         """
3891
3892         __slots__ = ("args_set",
3893                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3894                 "find_blockers", "logger", "mtimedb", "pkg",
3895                 "pkg_count", "pkg_to_replace", "prefetcher",
3896                 "settings", "statusMessage", "world_atom") + \
3897                 ("_install_task",)
3898
3899         def _start(self):
3900
3901                 pkg = self.pkg
3902                 build_opts = self.build_opts
3903
3904                 if pkg.installed:
3905                         # uninstall,  executed by self.merge()
3906                         self.returncode = os.EX_OK
3907                         self.wait()
3908                         return
3909
3910                 args_set = self.args_set
3911                 find_blockers = self.find_blockers
3912                 logger = self.logger
3913                 mtimedb = self.mtimedb
3914                 pkg_count = self.pkg_count
3915                 scheduler = self.scheduler
3916                 settings = self.settings
3917                 world_atom = self.world_atom
3918                 ldpath_mtimes = mtimedb["ldpath"]
3919
3920                 action_desc = "Emerging"
3921                 preposition = "for"
3922                 if pkg.type_name == "binary":
3923                         action_desc += " binary"
3924
3925                 if build_opts.fetchonly:
3926                         action_desc = "Fetching"
3927
3928                 msg = "%s (%s of %s) %s" % \
3929                         (action_desc,
3930                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3931                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3932                         colorize("GOOD", pkg.cpv))
3933
3934                 portdb = pkg.root_config.trees["porttree"].dbapi
3935                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3936                 if portdir_repo_name:
3937                         pkg_repo_name = pkg.metadata.get("repository")
3938                         if pkg_repo_name != portdir_repo_name:
3939                                 if not pkg_repo_name:
3940                                         pkg_repo_name = "unknown repo"
3941                                 msg += " from %s" % pkg_repo_name
3942
3943                 if pkg.root != "/":
3944                         msg += " %s %s" % (preposition, pkg.root)
3945
3946                 if not build_opts.pretend:
3947                         self.statusMessage(msg)
3948                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3949                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3950
3951                 if pkg.type_name == "ebuild":
3952
3953                         build = EbuildBuild(args_set=args_set,
3954                                 background=self.background,
3955                                 config_pool=self.config_pool,
3956                                 find_blockers=find_blockers,
3957                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3958                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3959                                 prefetcher=self.prefetcher, scheduler=scheduler,
3960                                 settings=settings, world_atom=world_atom)
3961
3962                         self._install_task = build
3963                         self._start_task(build, self._default_final_exit)
3964                         return
3965
3966                 elif pkg.type_name == "binary":
3967
3968                         binpkg = Binpkg(background=self.background,
3969                                 find_blockers=find_blockers,
3970                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3971                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3972                                 prefetcher=self.prefetcher, settings=settings,
3973                                 scheduler=scheduler, world_atom=world_atom)
3974
3975                         self._install_task = binpkg
3976                         self._start_task(binpkg, self._default_final_exit)
3977                         return
3978
3979         def _poll(self):
3980                 self._install_task.poll()
3981                 return self.returncode
3982
3983         def _wait(self):
3984                 self._install_task.wait()
3985                 return self.returncode
3986
3987         def merge(self):
3988
3989                 pkg = self.pkg
3990                 build_opts = self.build_opts
3991                 find_blockers = self.find_blockers
3992                 logger = self.logger
3993                 mtimedb = self.mtimedb
3994                 pkg_count = self.pkg_count
3995                 prefetcher = self.prefetcher
3996                 scheduler = self.scheduler
3997                 settings = self.settings
3998                 world_atom = self.world_atom
3999                 ldpath_mtimes = mtimedb["ldpath"]
4000
4001                 if pkg.installed:
4002                         if not (build_opts.buildpkgonly or \
4003                                 build_opts.fetchonly or build_opts.pretend):
4004
4005                                 uninstall = PackageUninstall(background=self.background,
4006                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4007                                         pkg=pkg, scheduler=scheduler, settings=settings)
4008
4009                                 uninstall.start()
4010                                 retval = uninstall.wait()
4011                                 if retval != os.EX_OK:
4012                                         return retval
4013                         return os.EX_OK
4014
4015                 if build_opts.fetchonly or \
4016                         build_opts.buildpkgonly:
4017                         return self.returncode
4018
4019                 retval = self._install_task.install()
4020                 return retval
4021
4022 class PackageMerge(AsynchronousTask):
4023         """
4024         TODO: Implement asynchronous merge so that the scheduler can
4025         run while a merge is executing.
4026         """
4027
4028         __slots__ = ("merge",)
4029
4030         def _start(self):
4031
4032                 pkg = self.merge.pkg
4033                 pkg_count = self.merge.pkg_count
4034
4035                 if pkg.installed:
4036                         action_desc = "Uninstalling"
4037                         preposition = "from"
4038                 else:
4039                         action_desc = "Installing"
4040                         preposition = "to"
4041
4042                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4043
4044                 if pkg.root != "/":
4045                         msg += " %s %s" % (preposition, pkg.root)
4046
4047                 if not self.merge.build_opts.fetchonly and \
4048                         not self.merge.build_opts.pretend and \
4049                         not self.merge.build_opts.buildpkgonly:
4050                         self.merge.statusMessage(msg)
4051
4052                 self.returncode = self.merge.merge()
4053                 self.wait()
4054
4055 class DependencyArg(object):
4056         def __init__(self, arg=None, root_config=None):
4057                 self.arg = arg
4058                 self.root_config = root_config
4059
4060         def __str__(self):
4061                 return str(self.arg)
4062
4063 class AtomArg(DependencyArg):
4064         def __init__(self, atom=None, **kwargs):
4065                 DependencyArg.__init__(self, **kwargs)
4066                 self.atom = atom
4067                 if not isinstance(self.atom, portage.dep.Atom):
4068                         self.atom = portage.dep.Atom(self.atom)
4069                 self.set = (self.atom, )
4070
4071 class PackageArg(DependencyArg):
4072         def __init__(self, package=None, **kwargs):
4073                 DependencyArg.__init__(self, **kwargs)
4074                 self.package = package
4075                 self.atom = portage.dep.Atom("=" + package.cpv)
4076                 self.set = (self.atom, )
4077
4078 class SetArg(DependencyArg):
4079         def __init__(self, set=None, **kwargs):
4080                 DependencyArg.__init__(self, **kwargs)
4081                 self.set = set
4082                 self.name = self.arg[len(SETPREFIX):]
4083
4084 class Dependency(SlotObject):
4085         __slots__ = ("atom", "blocker", "depth",
4086                 "parent", "onlydeps", "priority", "root")
4087         def __init__(self, **kwargs):
4088                 SlotObject.__init__(self, **kwargs)
4089                 if self.priority is None:
4090                         self.priority = DepPriority()
4091                 if self.depth is None:
4092                         self.depth = 0
4093
4094 class BlockerCache(portage.cache.mappings.MutableMapping):
4095         """This caches blockers of installed packages so that dep_check does not
4096         have to be done for every single installed package on every invocation of
4097         emerge.  The cache is invalidated whenever it is detected that something
4098         has changed that might alter the results of dep_check() calls:
4099                 1) the set of installed packages (including COUNTER) has changed
4100                 2) the old-style virtuals have changed
4101         """
4102
4103         # Number of uncached packages to trigger cache update, since
4104         # it's wasteful to update it for every vdb change.
4105         _cache_threshold = 5
4106
4107         class BlockerData(object):
4108
4109                 __slots__ = ("__weakref__", "atoms", "counter")
4110
4111                 def __init__(self, counter, atoms):
4112                         self.counter = counter
4113                         self.atoms = atoms
4114
4115         def __init__(self, myroot, vardb):
4116                 self._vardb = vardb
4117                 self._virtuals = vardb.settings.getvirtuals()
4118                 self._cache_filename = os.path.join(myroot,
4119                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4120                 self._cache_version = "1"
4121                 self._cache_data = None
4122                 self._modified = set()
4123                 self._load()
4124
4125         def _load(self):
4126                 try:
4127                         f = open(self._cache_filename, mode='rb')
4128                         mypickle = pickle.Unpickler(f)
4129                         try:
4130                                 mypickle.find_global = None
4131                         except AttributeError:
4132                                 # TODO: If py3k, override Unpickler.find_class().
4133                                 pass
4134                         self._cache_data = mypickle.load()
4135                         f.close()
4136                         del f
4137                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4138                         if isinstance(e, pickle.UnpicklingError):
4139                                 writemsg("!!! Error loading '%s': %s\n" % \
4140                                         (self._cache_filename, str(e)), noiselevel=-1)
4141                         del e
4142
4143                 cache_valid = self._cache_data and \
4144                         isinstance(self._cache_data, dict) and \
4145                         self._cache_data.get("version") == self._cache_version and \
4146                         isinstance(self._cache_data.get("blockers"), dict)
4147                 if cache_valid:
4148                         # Validate all the atoms and counters so that
4149                         # corruption is detected as soon as possible.
4150                         invalid_items = set()
4151                         for k, v in self._cache_data["blockers"].iteritems():
4152                                 if not isinstance(k, basestring):
4153                                         invalid_items.add(k)
4154                                         continue
4155                                 try:
4156                                         if portage.catpkgsplit(k) is None:
4157                                                 invalid_items.add(k)
4158                                                 continue
4159                                 except portage.exception.InvalidData:
4160                                         invalid_items.add(k)
4161                                         continue
4162                                 if not isinstance(v, tuple) or \
4163                                         len(v) != 2:
4164                                         invalid_items.add(k)
4165                                         continue
4166                                 counter, atoms = v
4167                                 if not isinstance(counter, (int, long)):
4168                                         invalid_items.add(k)
4169                                         continue
4170                                 if not isinstance(atoms, (list, tuple)):
4171                                         invalid_items.add(k)
4172                                         continue
4173                                 invalid_atom = False
4174                                 for atom in atoms:
4175                                         if not isinstance(atom, basestring):
4176                                                 invalid_atom = True
4177                                                 break
4178                                         if atom[:1] != "!" or \
4179                                                 not portage.isvalidatom(
4180                                                 atom, allow_blockers=True):
4181                                                 invalid_atom = True
4182                                                 break
4183                                 if invalid_atom:
4184                                         invalid_items.add(k)
4185                                         continue
4186
4187                         for k in invalid_items:
4188                                 del self._cache_data["blockers"][k]
4189                         if not self._cache_data["blockers"]:
4190                                 cache_valid = False
4191
4192                 if not cache_valid:
4193                         self._cache_data = {"version":self._cache_version}
4194                         self._cache_data["blockers"] = {}
4195                         self._cache_data["virtuals"] = self._virtuals
4196                 self._modified.clear()
4197
4198         def flush(self):
4199                 """If the current user has permission and the internal blocker cache
4200                 been updated, save it to disk and mark it unmodified.  This is called
4201                 by emerge after it has proccessed blockers for all installed packages.
4202                 Currently, the cache is only written if the user has superuser
4203                 privileges (since that's required to obtain a lock), but all users
4204                 have read access and benefit from faster blocker lookups (as long as
4205                 the entire cache is still valid).  The cache is stored as a pickled
4206                 dict object with the following format:
4207
4208                 {
4209                         version : "1",
4210                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4211                         "virtuals" : vardb.settings.getvirtuals()
4212                 }
4213                 """
4214                 if len(self._modified) >= self._cache_threshold and \
4215                         secpass >= 2:
4216                         try:
4217                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4218                                 pickle.dump(self._cache_data, f, protocol=2)
4219                                 f.close()
4220                                 portage.util.apply_secpass_permissions(
4221                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4222                         except (IOError, OSError), e:
4223                                 pass
4224                         self._modified.clear()
4225
4226         def __setitem__(self, cpv, blocker_data):
4227                 """
4228                 Update the cache and mark it as modified for a future call to
4229                 self.flush().
4230
4231                 @param cpv: Package for which to cache blockers.
4232                 @type cpv: String
4233                 @param blocker_data: An object with counter and atoms attributes.
4234                 @type blocker_data: BlockerData
4235                 """
4236                 self._cache_data["blockers"][cpv] = \
4237                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4238                 self._modified.add(cpv)
4239
4240         def __iter__(self):
4241                 if self._cache_data is None:
4242                         # triggered by python-trace
4243                         return iter([])
4244                 return iter(self._cache_data["blockers"])
4245
4246         def __delitem__(self, cpv):
4247                 del self._cache_data["blockers"][cpv]
4248
4249         def __getitem__(self, cpv):
4250                 """
4251                 @rtype: BlockerData
4252                 @returns: An object with counter and atoms attributes.
4253                 """
4254                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4255
4256 class BlockerDB(object):
4257
4258         def __init__(self, root_config):
4259                 self._root_config = root_config
4260                 self._vartree = root_config.trees["vartree"]
4261                 self._portdb = root_config.trees["porttree"].dbapi
4262
4263                 self._dep_check_trees = None
4264                 self._fake_vartree = None
4265
4266         def _get_fake_vartree(self, acquire_lock=0):
4267                 fake_vartree = self._fake_vartree
4268                 if fake_vartree is None:
4269                         fake_vartree = FakeVartree(self._root_config,
4270                                 acquire_lock=acquire_lock)
4271                         self._fake_vartree = fake_vartree
4272                         self._dep_check_trees = { self._vartree.root : {
4273                                 "porttree"    :  fake_vartree,
4274                                 "vartree"     :  fake_vartree,
4275                         }}
4276                 else:
4277                         fake_vartree.sync(acquire_lock=acquire_lock)
4278                 return fake_vartree
4279
4280         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4281                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4282                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4283                 settings = self._vartree.settings
4284                 stale_cache = set(blocker_cache)
4285                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4286                 dep_check_trees = self._dep_check_trees
4287                 vardb = fake_vartree.dbapi
4288                 installed_pkgs = list(vardb)
4289
4290                 for inst_pkg in installed_pkgs:
4291                         stale_cache.discard(inst_pkg.cpv)
4292                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4293                         if cached_blockers is not None and \
4294                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4295                                 cached_blockers = None
4296                         if cached_blockers is not None:
4297                                 blocker_atoms = cached_blockers.atoms
4298                         else:
4299                                 # Use aux_get() to trigger FakeVartree global
4300                                 # updates on *DEPEND when appropriate.
4301                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4302                                 try:
4303                                         portage.dep._dep_check_strict = False
4304                                         success, atoms = portage.dep_check(depstr,
4305                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4306                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4307                                 finally:
4308                                         portage.dep._dep_check_strict = True
4309                                 if not success:
4310                                         pkg_location = os.path.join(inst_pkg.root,
4311                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4312                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4313                                                 (pkg_location, atoms), noiselevel=-1)
4314                                         continue
4315
4316                                 blocker_atoms = [atom for atom in atoms \
4317                                         if atom.startswith("!")]
4318                                 blocker_atoms.sort()
4319                                 counter = long(inst_pkg.metadata["COUNTER"])
4320                                 blocker_cache[inst_pkg.cpv] = \
4321                                         blocker_cache.BlockerData(counter, blocker_atoms)
4322                 for cpv in stale_cache:
4323                         del blocker_cache[cpv]
4324                 blocker_cache.flush()
4325
4326                 blocker_parents = digraph()
4327                 blocker_atoms = []
4328                 for pkg in installed_pkgs:
4329                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4330                                 blocker_atom = blocker_atom.lstrip("!")
4331                                 blocker_atoms.append(blocker_atom)
4332                                 blocker_parents.add(blocker_atom, pkg)
4333
4334                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4335                 blocking_pkgs = set()
4336                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4337                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4338
4339                 # Check for blockers in the other direction.
4340                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4341                 try:
4342                         portage.dep._dep_check_strict = False
4343                         success, atoms = portage.dep_check(depstr,
4344                                 vardb, settings, myuse=new_pkg.use.enabled,
4345                                 trees=dep_check_trees, myroot=new_pkg.root)
4346                 finally:
4347                         portage.dep._dep_check_strict = True
4348                 if not success:
4349                         # We should never get this far with invalid deps.
4350                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4351                         assert False
4352
4353                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4354                         if atom[:1] == "!"]
4355                 if blocker_atoms:
4356                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4357                         for inst_pkg in installed_pkgs:
4358                                 try:
4359                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4360                                 except (portage.exception.InvalidDependString, StopIteration):
4361                                         continue
4362                                 blocking_pkgs.add(inst_pkg)
4363
4364                 return blocking_pkgs
4365
4366 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4367
4368         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4369                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4370         p_type, p_root, p_key, p_status = parent_node
4371         msg = []
4372         if p_status == "nomerge":
4373                 category, pf = portage.catsplit(p_key)
4374                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4375                 msg.append("Portage is unable to process the dependencies of the ")
4376                 msg.append("'%s' package. " % p_key)
4377                 msg.append("In order to correct this problem, the package ")
4378                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4379                 msg.append("As a temporary workaround, the --nodeps option can ")
4380                 msg.append("be used to ignore all dependencies.  For reference, ")
4381                 msg.append("the problematic dependencies can be found in the ")
4382                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4383         else:
4384                 msg.append("This package can not be installed. ")
4385                 msg.append("Please notify the '%s' package maintainer " % p_key)
4386                 msg.append("about this problem.")
4387
4388         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4389         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4390
4391 class PackageVirtualDbapi(portage.dbapi):
4392         """
4393         A dbapi-like interface class that represents the state of the installed
4394         package database as new packages are installed, replacing any packages
4395         that previously existed in the same slot. The main difference between
4396         this class and fakedbapi is that this one uses Package instances
4397         internally (passed in via cpv_inject() and cpv_remove() calls).
4398         """
4399         def __init__(self, settings):
4400                 portage.dbapi.__init__(self)
4401                 self.settings = settings
4402                 self._match_cache = {}
4403                 self._cp_map = {}
4404                 self._cpv_map = {}
4405
4406         def clear(self):
4407                 """
4408                 Remove all packages.
4409                 """
4410                 if self._cpv_map:
4411                         self._clear_cache()
4412                         self._cp_map.clear()
4413                         self._cpv_map.clear()
4414
4415         def copy(self):
4416                 obj = PackageVirtualDbapi(self.settings)
4417                 obj._match_cache = self._match_cache.copy()
4418                 obj._cp_map = self._cp_map.copy()
4419                 for k, v in obj._cp_map.iteritems():
4420                         obj._cp_map[k] = v[:]
4421                 obj._cpv_map = self._cpv_map.copy()
4422                 return obj
4423
4424         def __iter__(self):
4425                 return self._cpv_map.itervalues()
4426
4427         def __contains__(self, item):
4428                 existing = self._cpv_map.get(item.cpv)
4429                 if existing is not None and \
4430                         existing == item:
4431                         return True
4432                 return False
4433
4434         def get(self, item, default=None):
4435                 cpv = getattr(item, "cpv", None)
4436                 if cpv is None:
4437                         if len(item) != 4:
4438                                 return default
4439                         type_name, root, cpv, operation = item
4440
4441                 existing = self._cpv_map.get(cpv)
4442                 if existing is not None and \
4443                         existing == item:
4444                         return existing
4445                 return default
4446
4447         def match_pkgs(self, atom):
4448                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4449
4450         def _clear_cache(self):
4451                 if self._categories is not None:
4452                         self._categories = None
4453                 if self._match_cache:
4454                         self._match_cache = {}
4455
4456         def match(self, origdep, use_cache=1):
4457                 result = self._match_cache.get(origdep)
4458                 if result is not None:
4459                         return result[:]
4460                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4461                 self._match_cache[origdep] = result
4462                 return result[:]
4463
4464         def cpv_exists(self, cpv):
4465                 return cpv in self._cpv_map
4466
4467         def cp_list(self, mycp, use_cache=1):
4468                 cachelist = self._match_cache.get(mycp)
4469                 # cp_list() doesn't expand old-style virtuals
4470                 if cachelist and cachelist[0].startswith(mycp):
4471                         return cachelist[:]
4472                 cpv_list = self._cp_map.get(mycp)
4473                 if cpv_list is None:
4474                         cpv_list = []
4475                 else:
4476                         cpv_list = [pkg.cpv for pkg in cpv_list]
4477                 self._cpv_sort_ascending(cpv_list)
4478                 if not (not cpv_list and mycp.startswith("virtual/")):
4479                         self._match_cache[mycp] = cpv_list
4480                 return cpv_list[:]
4481
4482         def cp_all(self):
4483                 return list(self._cp_map)
4484
4485         def cpv_all(self):
4486                 return list(self._cpv_map)
4487
4488         def cpv_inject(self, pkg):
4489                 cp_list = self._cp_map.get(pkg.cp)
4490                 if cp_list is None:
4491                         cp_list = []
4492                         self._cp_map[pkg.cp] = cp_list
4493                 e_pkg = self._cpv_map.get(pkg.cpv)
4494                 if e_pkg is not None:
4495                         if e_pkg == pkg:
4496                                 return
4497                         self.cpv_remove(e_pkg)
4498                 for e_pkg in cp_list:
4499                         if e_pkg.slot_atom == pkg.slot_atom:
4500                                 if e_pkg == pkg:
4501                                         return
4502                                 self.cpv_remove(e_pkg)
4503                                 break
4504                 cp_list.append(pkg)
4505                 self._cpv_map[pkg.cpv] = pkg
4506                 self._clear_cache()
4507
4508         def cpv_remove(self, pkg):
4509                 old_pkg = self._cpv_map.get(pkg.cpv)
4510                 if old_pkg != pkg:
4511                         raise KeyError(pkg)
4512                 self._cp_map[pkg.cp].remove(pkg)
4513                 del self._cpv_map[pkg.cpv]
4514                 self._clear_cache()
4515
4516         def aux_get(self, cpv, wants):
4517                 metadata = self._cpv_map[cpv].metadata
4518                 return [metadata.get(x, "") for x in wants]
4519
4520         def aux_update(self, cpv, values):
4521                 self._cpv_map[cpv].metadata.update(values)
4522                 self._clear_cache()
4523
4524 class depgraph(object):
4525
4526         pkg_tree_map = RootConfig.pkg_tree_map
4527
4528         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4529
4530         def __init__(self, settings, trees, myopts, myparams, spinner):
4531                 self.settings = settings
4532                 self.target_root = settings["ROOT"]
4533                 self.myopts = myopts
4534                 self.myparams = myparams
4535                 self.edebug = 0
4536                 if settings.get("PORTAGE_DEBUG", "") == "1":
4537                         self.edebug = 1
4538                 self.spinner = spinner
4539                 self._running_root = trees["/"]["root_config"]
4540                 self._opts_no_restart = Scheduler._opts_no_restart
4541                 self.pkgsettings = {}
4542                 # Maps slot atom to package for each Package added to the graph.
4543                 self._slot_pkg_map = {}
4544                 # Maps nodes to the reasons they were selected for reinstallation.
4545                 self._reinstall_nodes = {}
4546                 self.mydbapi = {}
4547                 self.trees = {}
4548                 self._trees_orig = trees
4549                 self.roots = {}
4550                 # Contains a filtered view of preferred packages that are selected
4551                 # from available repositories.
4552                 self._filtered_trees = {}
4553                 # Contains installed packages and new packages that have been added
4554                 # to the graph.
4555                 self._graph_trees = {}
4556                 # All Package instances
4557                 self._pkg_cache = {}
4558                 for myroot in trees:
4559                         self.trees[myroot] = {}
4560                         # Create a RootConfig instance that references
4561                         # the FakeVartree instead of the real one.
4562                         self.roots[myroot] = RootConfig(
4563                                 trees[myroot]["vartree"].settings,
4564                                 self.trees[myroot],
4565                                 trees[myroot]["root_config"].setconfig)
4566                         for tree in ("porttree", "bintree"):
4567                                 self.trees[myroot][tree] = trees[myroot][tree]
4568                         self.trees[myroot]["vartree"] = \
4569                                 FakeVartree(trees[myroot]["root_config"],
4570                                         pkg_cache=self._pkg_cache)
4571                         self.pkgsettings[myroot] = portage.config(
4572                                 clone=self.trees[myroot]["vartree"].settings)
4573                         self._slot_pkg_map[myroot] = {}
4574                         vardb = self.trees[myroot]["vartree"].dbapi
4575                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4576                                 "--buildpkgonly" not in self.myopts
4577                         # This fakedbapi instance will model the state that the vdb will
4578                         # have after new packages have been installed.
4579                         fakedb = PackageVirtualDbapi(vardb.settings)
4580                         if preload_installed_pkgs:
4581                                 for pkg in vardb:
4582                                         self.spinner.update()
4583                                         # This triggers metadata updates via FakeVartree.
4584                                         vardb.aux_get(pkg.cpv, [])
4585                                         fakedb.cpv_inject(pkg)
4586
4587                         # Now that the vardb state is cached in our FakeVartree,
4588                         # we won't be needing the real vartree cache for awhile.
4589                         # To make some room on the heap, clear the vardbapi
4590                         # caches.
4591                         trees[myroot]["vartree"].dbapi._clear_cache()
4592                         gc.collect()
4593
4594                         self.mydbapi[myroot] = fakedb
4595                         def graph_tree():
4596                                 pass
4597                         graph_tree.dbapi = fakedb
4598                         self._graph_trees[myroot] = {}
4599                         self._filtered_trees[myroot] = {}
4600                         # Substitute the graph tree for the vartree in dep_check() since we
4601                         # want atom selections to be consistent with package selections
4602                         # have already been made.
4603                         self._graph_trees[myroot]["porttree"]   = graph_tree
4604                         self._graph_trees[myroot]["vartree"]    = graph_tree
4605                         def filtered_tree():
4606                                 pass
4607                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4608                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4609
4610                         # Passing in graph_tree as the vartree here could lead to better
4611                         # atom selections in some cases by causing atoms for packages that
4612                         # have been added to the graph to be preferred over other choices.
4613                         # However, it can trigger atom selections that result in
4614                         # unresolvable direct circular dependencies. For example, this
4615                         # happens with gwydion-dylan which depends on either itself or
4616                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4617                         # gwydion-dylan-bin needs to be selected in order to avoid a
4618                         # an unresolvable direct circular dependency.
4619                         #
4620                         # To solve the problem described above, pass in "graph_db" so that
4621                         # packages that have been added to the graph are distinguishable
4622                         # from other available packages and installed packages. Also, pass
4623                         # the parent package into self._select_atoms() calls so that
4624                         # unresolvable direct circular dependencies can be detected and
4625                         # avoided when possible.
4626                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4627                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4628
4629                         dbs = []
4630                         portdb = self.trees[myroot]["porttree"].dbapi
4631                         bindb  = self.trees[myroot]["bintree"].dbapi
4632                         vardb  = self.trees[myroot]["vartree"].dbapi
4633                         #               (db, pkg_type, built, installed, db_keys)
4634                         if "--usepkgonly" not in self.myopts:
4635                                 db_keys = list(portdb._aux_cache_keys)
4636                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4637                         if "--usepkg" in self.myopts:
4638                                 db_keys = list(bindb._aux_cache_keys)
4639                                 dbs.append((bindb,  "binary", True, False, db_keys))
4640                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4641                         dbs.append((vardb, "installed", True, True, db_keys))
4642                         self._filtered_trees[myroot]["dbs"] = dbs
4643                         if "--usepkg" in self.myopts:
4644                                 self.trees[myroot]["bintree"].populate(
4645                                         "--getbinpkg" in self.myopts,
4646                                         "--getbinpkgonly" in self.myopts)
4647                 del trees
4648
4649                 self.digraph=portage.digraph()
4650                 # contains all sets added to the graph
4651                 self._sets = {}
4652                 # contains atoms given as arguments
4653                 self._sets["args"] = InternalPackageSet()
4654                 # contains all atoms from all sets added to the graph, including
4655                 # atoms given as arguments
4656                 self._set_atoms = InternalPackageSet()
4657                 self._atom_arg_map = {}
4658                 # contains all nodes pulled in by self._set_atoms
4659                 self._set_nodes = set()
4660                 # Contains only Blocker -> Uninstall edges
4661                 self._blocker_uninstalls = digraph()
4662                 # Contains only Package -> Blocker edges
4663                 self._blocker_parents = digraph()
4664                 # Contains only irrelevant Package -> Blocker edges
4665                 self._irrelevant_blockers = digraph()
4666                 # Contains only unsolvable Package -> Blocker edges
4667                 self._unsolvable_blockers = digraph()
4668                 # Contains all Blocker -> Blocked Package edges
4669                 self._blocked_pkgs = digraph()
4670                 # Contains world packages that have been protected from
4671                 # uninstallation but may not have been added to the graph
4672                 # if the graph is not complete yet.
4673                 self._blocked_world_pkgs = {}
4674                 self._slot_collision_info = {}
4675                 # Slot collision nodes are not allowed to block other packages since
4676                 # blocker validation is only able to account for one package per slot.
4677                 self._slot_collision_nodes = set()
4678                 self._parent_atoms = {}
4679                 self._slot_conflict_parent_atoms = set()
4680                 self._serialized_tasks_cache = None
4681                 self._scheduler_graph = None
4682                 self._displayed_list = None
4683                 self._pprovided_args = []
4684                 self._missing_args = []
4685                 self._masked_installed = set()
4686                 self._unsatisfied_deps_for_display = []
4687                 self._unsatisfied_blockers_for_display = None
4688                 self._circular_deps_for_display = None
4689                 self._dep_stack = []
4690                 self._unsatisfied_deps = []
4691                 self._initially_unsatisfied_deps = []
4692                 self._ignored_deps = []
4693                 self._required_set_names = set(["system", "world"])
4694                 self._select_atoms = self._select_atoms_highest_available
4695                 self._select_package = self._select_pkg_highest_available
4696                 self._highest_pkg_cache = {}
4697
4698         def _show_slot_collision_notice(self):
4699                 """Show an informational message advising the user to mask one of the
4700                 the packages. In some cases it may be possible to resolve this
4701                 automatically, but support for backtracking (removal nodes that have
4702                 already been selected) will be required in order to handle all possible
4703                 cases.
4704                 """
4705
4706                 if not self._slot_collision_info:
4707                         return
4708
4709                 self._show_merge_list()
4710
4711                 msg = []
4712                 msg.append("\n!!! Multiple package instances within a single " + \
4713                         "package slot have been pulled\n")
4714                 msg.append("!!! into the dependency graph, resulting" + \
4715                         " in a slot conflict:\n\n")
4716                 indent = "  "
4717                 # Max number of parents shown, to avoid flooding the display.
4718                 max_parents = 3
4719                 explanation_columns = 70
4720                 explanations = 0
4721                 for (slot_atom, root), slot_nodes \
4722                         in self._slot_collision_info.iteritems():
4723                         msg.append(str(slot_atom))
4724                         msg.append("\n\n")
4725
4726                         for node in slot_nodes:
4727                                 msg.append(indent)
4728                                 msg.append(str(node))
4729                                 parent_atoms = self._parent_atoms.get(node)
4730                                 if parent_atoms:
4731                                         pruned_list = set()
4732                                         # Prefer conflict atoms over others.
4733                                         for parent_atom in parent_atoms:
4734                                                 if len(pruned_list) >= max_parents:
4735                                                         break
4736                                                 if parent_atom in self._slot_conflict_parent_atoms:
4737                                                         pruned_list.add(parent_atom)
4738
4739                                         # If this package was pulled in by conflict atoms then
4740                                         # show those alone since those are the most interesting.
4741                                         if not pruned_list:
4742                                                 # When generating the pruned list, prefer instances
4743                                                 # of DependencyArg over instances of Package.
4744                                                 for parent_atom in parent_atoms:
4745                                                         if len(pruned_list) >= max_parents:
4746                                                                 break
4747                                                         parent, atom = parent_atom
4748                                                         if isinstance(parent, DependencyArg):
4749                                                                 pruned_list.add(parent_atom)
4750                                                 # Prefer Packages instances that themselves have been
4751                                                 # pulled into collision slots.
4752                                                 for parent_atom in parent_atoms:
4753                                                         if len(pruned_list) >= max_parents:
4754                                                                 break
4755                                                         parent, atom = parent_atom
4756                                                         if isinstance(parent, Package) and \
4757                                                                 (parent.slot_atom, parent.root) \
4758                                                                 in self._slot_collision_info:
4759                                                                 pruned_list.add(parent_atom)
4760                                                 for parent_atom in parent_atoms:
4761                                                         if len(pruned_list) >= max_parents:
4762                                                                 break
4763                                                         pruned_list.add(parent_atom)
4764                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4765                                         parent_atoms = pruned_list
4766                                         msg.append(" pulled in by\n")
4767                                         for parent_atom in parent_atoms:
4768                                                 parent, atom = parent_atom
4769                                                 msg.append(2*indent)
4770                                                 if isinstance(parent,
4771                                                         (PackageArg, AtomArg)):
4772                                                         # For PackageArg and AtomArg types, it's
4773                                                         # redundant to display the atom attribute.
4774                                                         msg.append(str(parent))
4775                                                 else:
4776                                                         # Display the specific atom from SetArg or
4777                                                         # Package types.
4778                                                         msg.append("%s required by %s" % (atom, parent))
4779                                                 msg.append("\n")
4780                                         if omitted_parents:
4781                                                 msg.append(2*indent)
4782                                                 msg.append("(and %d more)\n" % omitted_parents)
4783                                 else:
4784                                         msg.append(" (no parents)\n")
4785                                 msg.append("\n")
4786                         explanation = self._slot_conflict_explanation(slot_nodes)
4787                         if explanation:
4788                                 explanations += 1
4789                                 msg.append(indent + "Explanation:\n\n")
4790                                 for line in textwrap.wrap(explanation, explanation_columns):
4791                                         msg.append(2*indent + line + "\n")
4792                                 msg.append("\n")
4793                 msg.append("\n")
4794                 sys.stderr.write("".join(msg))
4795                 sys.stderr.flush()
4796
4797                 explanations_for_all = explanations == len(self._slot_collision_info)
4798
4799                 if explanations_for_all or "--quiet" in self.myopts:
4800                         return
4801
4802                 msg = []
4803                 msg.append("It may be possible to solve this problem ")
4804                 msg.append("by using package.mask to prevent one of ")
4805                 msg.append("those packages from being selected. ")
4806                 msg.append("However, it is also possible that conflicting ")
4807                 msg.append("dependencies exist such that they are impossible to ")
4808                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4809                 msg.append("the dependencies of two different packages, then those ")
4810                 msg.append("packages can not be installed simultaneously.")
4811
4812                 from formatter import AbstractFormatter, DumbWriter
4813                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4814                 for x in msg:
4815                         f.add_flowing_data(x)
4816                 f.end_paragraph(1)
4817
4818                 msg = []
4819                 msg.append("For more information, see MASKED PACKAGES ")
4820                 msg.append("section in the emerge man page or refer ")
4821                 msg.append("to the Gentoo Handbook.")
4822                 for x in msg:
4823                         f.add_flowing_data(x)
4824                 f.end_paragraph(1)
4825                 f.writer.flush()
4826
4827         def _slot_conflict_explanation(self, slot_nodes):
4828                 """
4829                 When a slot conflict occurs due to USE deps, there are a few
4830                 different cases to consider:
4831
4832                 1) New USE are correctly set but --newuse wasn't requested so an
4833                    installed package with incorrect USE happened to get pulled
4834                    into graph before the new one.
4835
4836                 2) New USE are incorrectly set but an installed package has correct
4837                    USE so it got pulled into the graph, and a new instance also got
4838                    pulled in due to --newuse or an upgrade.
4839
4840                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4841                    and multiple package instances got pulled into the same slot to
4842                    satisfy the conflicting deps.
4843
4844                 Currently, explanations and suggested courses of action are generated
4845                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4846                 """
4847
4848                 if len(slot_nodes) != 2:
4849                         # Suggestions are only implemented for
4850                         # conflicts between two packages.
4851                         return None
4852
4853                 all_conflict_atoms = self._slot_conflict_parent_atoms
4854                 matched_node = None
4855                 matched_atoms = None
4856                 unmatched_node = None
4857                 for node in slot_nodes:
4858                         parent_atoms = self._parent_atoms.get(node)
4859                         if not parent_atoms:
4860                                 # Normally, there are always parent atoms. If there are
4861                                 # none then something unexpected is happening and there's
4862                                 # currently no suggestion for this case.
4863                                 return None
4864                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4865                         for parent_atom in conflict_atoms:
4866                                 parent, atom = parent_atom
4867                                 if not atom.use:
4868                                         # Suggestions are currently only implemented for cases
4869                                         # in which all conflict atoms have USE deps.
4870                                         return None
4871                         if conflict_atoms:
4872                                 if matched_node is not None:
4873                                         # If conflict atoms match multiple nodes
4874                                         # then there's no suggestion.
4875                                         return None
4876                                 matched_node = node
4877                                 matched_atoms = conflict_atoms
4878                         else:
4879                                 if unmatched_node is not None:
4880                                         # Neither node is matched by conflict atoms, and
4881                                         # there is no suggestion for this case.
4882                                         return None
4883                                 unmatched_node = node
4884
4885                 if matched_node is None or unmatched_node is None:
4886                         # This shouldn't happen.
4887                         return None
4888
4889                 if unmatched_node.installed and not matched_node.installed and \
4890                         unmatched_node.cpv == matched_node.cpv:
4891                         # If the conflicting packages are the same version then
4892                         # --newuse should be all that's needed. If they are different
4893                         # versions then there's some other problem.
4894                         return "New USE are correctly set, but --newuse wasn't" + \
4895                                 " requested, so an installed package with incorrect USE " + \
4896                                 "happened to get pulled into the dependency graph. " + \
4897                                 "In order to solve " + \
4898                                 "this, either specify the --newuse option or explicitly " + \
4899                                 " reinstall '%s'." % matched_node.slot_atom
4900
4901                 if matched_node.installed and not unmatched_node.installed:
4902                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4903                         explanation = ("New USE for '%s' are incorrectly set. " + \
4904                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4905                                 (matched_node.slot_atom, atoms[0])
4906                         if len(atoms) > 1:
4907                                 for atom in atoms[1:-1]:
4908                                         explanation += ", '%s'" % (atom,)
4909                                 if len(atoms) > 2:
4910                                         explanation += ","
4911                                 explanation += " and '%s'" % (atoms[-1],)
4912                         explanation += "."
4913                         return explanation
4914
4915                 return None
4916
4917         def _process_slot_conflicts(self):
4918                 """
4919                 Process slot conflict data to identify specific atoms which
4920                 lead to conflict. These atoms only match a subset of the
4921                 packages that have been pulled into a given slot.
4922                 """
4923                 for (slot_atom, root), slot_nodes \
4924                         in self._slot_collision_info.iteritems():
4925
4926                         all_parent_atoms = set()
4927                         for pkg in slot_nodes:
4928                                 parent_atoms = self._parent_atoms.get(pkg)
4929                                 if not parent_atoms:
4930                                         continue
4931                                 all_parent_atoms.update(parent_atoms)
4932
4933                         for pkg in slot_nodes:
4934                                 parent_atoms = self._parent_atoms.get(pkg)
4935                                 if parent_atoms is None:
4936                                         parent_atoms = set()
4937                                         self._parent_atoms[pkg] = parent_atoms
4938                                 for parent_atom in all_parent_atoms:
4939                                         if parent_atom in parent_atoms:
4940                                                 continue
4941                                         # Use package set for matching since it will match via
4942                                         # PROVIDE when necessary, while match_from_list does not.
4943                                         parent, atom = parent_atom
4944                                         atom_set = InternalPackageSet(
4945                                                 initial_atoms=(atom,))
4946                                         if atom_set.findAtomForPackage(pkg):
4947                                                 parent_atoms.add(parent_atom)
4948                                         else:
4949                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4950
4951         def _reinstall_for_flags(self, forced_flags,
4952                 orig_use, orig_iuse, cur_use, cur_iuse):
4953                 """Return a set of flags that trigger reinstallation, or None if there
4954                 are no such flags."""
4955                 if "--newuse" in self.myopts:
4956                         flags = set(orig_iuse.symmetric_difference(
4957                                 cur_iuse).difference(forced_flags))
4958                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4959                                 cur_iuse.intersection(cur_use)))
4960                         if flags:
4961                                 return flags
4962                 elif "changed-use" == self.myopts.get("--reinstall"):
4963                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4964                                 cur_iuse.intersection(cur_use))
4965                         if flags:
4966                                 return flags
4967                 return None
4968
4969         def _create_graph(self, allow_unsatisfied=False):
4970                 dep_stack = self._dep_stack
4971                 while dep_stack:
4972                         self.spinner.update()
4973                         dep = dep_stack.pop()
4974                         if isinstance(dep, Package):
4975                                 if not self._add_pkg_deps(dep,
4976                                         allow_unsatisfied=allow_unsatisfied):
4977                                         return 0
4978                                 continue
4979                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4980                                 return 0
4981                 return 1
4982
4983         def _add_dep(self, dep, allow_unsatisfied=False):
4984                 debug = "--debug" in self.myopts
4985                 buildpkgonly = "--buildpkgonly" in self.myopts
4986                 nodeps = "--nodeps" in self.myopts
4987                 empty = "empty" in self.myparams
4988                 deep = "deep" in self.myparams
4989                 update = "--update" in self.myopts and dep.depth <= 1
4990                 if dep.blocker:
4991                         if not buildpkgonly and \
4992                                 not nodeps and \
4993                                 dep.parent not in self._slot_collision_nodes:
4994                                 if dep.parent.onlydeps:
4995                                         # It's safe to ignore blockers if the
4996                                         # parent is an --onlydeps node.
4997                                         return 1
4998                                 # The blocker applies to the root where
4999                                 # the parent is or will be installed.
5000                                 blocker = Blocker(atom=dep.atom,
5001                                         eapi=dep.parent.metadata["EAPI"],
5002                                         root=dep.parent.root)
5003                                 self._blocker_parents.add(blocker, dep.parent)
5004                         return 1
5005                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5006                         onlydeps=dep.onlydeps)
5007                 if not dep_pkg:
5008                         if dep.priority.optional:
5009                                 # This could be an unecessary build-time dep
5010                                 # pulled in by --with-bdeps=y.
5011                                 return 1
5012                         if allow_unsatisfied:
5013                                 self._unsatisfied_deps.append(dep)
5014                                 return 1
5015                         self._unsatisfied_deps_for_display.append(
5016                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5017                         return 0
5018                 # In some cases, dep_check will return deps that shouldn't
5019                 # be proccessed any further, so they are identified and
5020                 # discarded here. Try to discard as few as possible since
5021                 # discarded dependencies reduce the amount of information
5022                 # available for optimization of merge order.
5023                 if dep.priority.satisfied and \
5024                         not dep_pkg.installed and \
5025                         not (existing_node or empty or deep or update):
5026                         myarg = None
5027                         if dep.root == self.target_root:
5028                                 try:
5029                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5030                                 except StopIteration:
5031                                         pass
5032                                 except portage.exception.InvalidDependString:
5033                                         if not dep_pkg.installed:
5034                                                 # This shouldn't happen since the package
5035                                                 # should have been masked.
5036                                                 raise
5037                         if not myarg:
5038                                 self._ignored_deps.append(dep)
5039                                 return 1
5040
5041                 if not self._add_pkg(dep_pkg, dep):
5042                         return 0
5043                 return 1
5044
5045         def _add_pkg(self, pkg, dep):
5046                 myparent = None
5047                 priority = None
5048                 depth = 0
5049                 if dep is None:
5050                         dep = Dependency()
5051                 else:
5052                         myparent = dep.parent
5053                         priority = dep.priority
5054                         depth = dep.depth
5055                 if priority is None:
5056                         priority = DepPriority()
5057                 """
5058                 Fills the digraph with nodes comprised of packages to merge.
5059                 mybigkey is the package spec of the package to merge.
5060                 myparent is the package depending on mybigkey ( or None )
5061                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5062                         Think --onlydeps, we need to ignore packages in that case.
5063                 #stuff to add:
5064                 #SLOT-aware emerge
5065                 #IUSE-aware emerge -> USE DEP aware depgraph
5066                 #"no downgrade" emerge
5067                 """
5068                 # Ensure that the dependencies of the same package
5069                 # are never processed more than once.
5070                 previously_added = pkg in self.digraph
5071
5072                 # select the correct /var database that we'll be checking against
5073                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5074                 pkgsettings = self.pkgsettings[pkg.root]
5075
5076                 arg_atoms = None
5077                 if True:
5078                         try:
5079                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5080                         except portage.exception.InvalidDependString, e:
5081                                 if not pkg.installed:
5082                                         show_invalid_depstring_notice(
5083                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5084                                         return 0
5085                                 del e
5086
5087                 if not pkg.onlydeps:
5088                         if not pkg.installed and \
5089                                 "empty" not in self.myparams and \
5090                                 vardbapi.match(pkg.slot_atom):
5091                                 # Increase the priority of dependencies on packages that
5092                                 # are being rebuilt. This optimizes merge order so that
5093                                 # dependencies are rebuilt/updated as soon as possible,
5094                                 # which is needed especially when emerge is called by
5095                                 # revdep-rebuild since dependencies may be affected by ABI
5096                                 # breakage that has rendered them useless. Don't adjust
5097                                 # priority here when in "empty" mode since all packages
5098                                 # are being merged in that case.
5099                                 priority.rebuild = True
5100
5101                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5102                         slot_collision = False
5103                         if existing_node:
5104                                 existing_node_matches = pkg.cpv == existing_node.cpv
5105                                 if existing_node_matches and \
5106                                         pkg != existing_node and \
5107                                         dep.atom is not None:
5108                                         # Use package set for matching since it will match via
5109                                         # PROVIDE when necessary, while match_from_list does not.
5110                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5111                                         if not atom_set.findAtomForPackage(existing_node):
5112                                                 existing_node_matches = False
5113                                 if existing_node_matches:
5114                                         # The existing node can be reused.
5115                                         if arg_atoms:
5116                                                 for parent_atom in arg_atoms:
5117                                                         parent, atom = parent_atom
5118                                                         self.digraph.add(existing_node, parent,
5119                                                                 priority=priority)
5120                                                         self._add_parent_atom(existing_node, parent_atom)
5121                                         # If a direct circular dependency is not an unsatisfied
5122                                         # buildtime dependency then drop it here since otherwise
5123                                         # it can skew the merge order calculation in an unwanted
5124                                         # way.
5125                                         if existing_node != myparent or \
5126                                                 (priority.buildtime and not priority.satisfied):
5127                                                 self.digraph.addnode(existing_node, myparent,
5128                                                         priority=priority)
5129                                                 if dep.atom is not None and dep.parent is not None:
5130                                                         self._add_parent_atom(existing_node,
5131                                                                 (dep.parent, dep.atom))
5132                                         return 1
5133                                 else:
5134
5135                                         # A slot collision has occurred.  Sometimes this coincides
5136                                         # with unresolvable blockers, so the slot collision will be
5137                                         # shown later if there are no unresolvable blockers.
5138                                         self._add_slot_conflict(pkg)
5139                                         slot_collision = True
5140
5141                         if slot_collision:
5142                                 # Now add this node to the graph so that self.display()
5143                                 # can show use flags and --tree portage.output.  This node is
5144                                 # only being partially added to the graph.  It must not be
5145                                 # allowed to interfere with the other nodes that have been
5146                                 # added.  Do not overwrite data for existing nodes in
5147                                 # self.mydbapi since that data will be used for blocker
5148                                 # validation.
5149                                 # Even though the graph is now invalid, continue to process
5150                                 # dependencies so that things like --fetchonly can still
5151                                 # function despite collisions.
5152                                 pass
5153                         elif not previously_added:
5154                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5155                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5156                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5157
5158                         if not pkg.installed:
5159                                 # Allow this package to satisfy old-style virtuals in case it
5160                                 # doesn't already. Any pre-existing providers will be preferred
5161                                 # over this one.
5162                                 try:
5163                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5164                                         # For consistency, also update the global virtuals.
5165                                         settings = self.roots[pkg.root].settings
5166                                         settings.unlock()
5167                                         settings.setinst(pkg.cpv, pkg.metadata)
5168                                         settings.lock()
5169                                 except portage.exception.InvalidDependString, e:
5170                                         show_invalid_depstring_notice(
5171                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5172                                         del e
5173                                         return 0
5174
5175                 if arg_atoms:
5176                         self._set_nodes.add(pkg)
5177
5178                 # Do this even when addme is False (--onlydeps) so that the
5179                 # parent/child relationship is always known in case
5180                 # self._show_slot_collision_notice() needs to be called later.
5181                 self.digraph.add(pkg, myparent, priority=priority)
5182                 if dep.atom is not None and dep.parent is not None:
5183                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5184
5185                 if arg_atoms:
5186                         for parent_atom in arg_atoms:
5187                                 parent, atom = parent_atom
5188                                 self.digraph.add(pkg, parent, priority=priority)
5189                                 self._add_parent_atom(pkg, parent_atom)
5190
5191                 """ This section determines whether we go deeper into dependencies or not.
5192                     We want to go deeper on a few occasions:
5193                     Installing package A, we need to make sure package A's deps are met.
5194                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5195                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5196                 """
5197                 dep_stack = self._dep_stack
5198                 if "recurse" not in self.myparams:
5199                         return 1
5200                 elif pkg.installed and \
5201                         "deep" not in self.myparams:
5202                         dep_stack = self._ignored_deps
5203
5204                 self.spinner.update()
5205
5206                 if arg_atoms:
5207                         depth = 0
5208                 pkg.depth = depth
5209                 if not previously_added:
5210                         dep_stack.append(pkg)
5211                 return 1
5212
5213         def _add_parent_atom(self, pkg, parent_atom):
5214                 parent_atoms = self._parent_atoms.get(pkg)
5215                 if parent_atoms is None:
5216                         parent_atoms = set()
5217                         self._parent_atoms[pkg] = parent_atoms
5218                 parent_atoms.add(parent_atom)
5219
5220         def _add_slot_conflict(self, pkg):
5221                 self._slot_collision_nodes.add(pkg)
5222                 slot_key = (pkg.slot_atom, pkg.root)
5223                 slot_nodes = self._slot_collision_info.get(slot_key)
5224                 if slot_nodes is None:
5225                         slot_nodes = set()
5226                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5227                         self._slot_collision_info[slot_key] = slot_nodes
5228                 slot_nodes.add(pkg)
5229
5230         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5231
5232                 mytype = pkg.type_name
5233                 myroot = pkg.root
5234                 mykey = pkg.cpv
5235                 metadata = pkg.metadata
5236                 myuse = pkg.use.enabled
5237                 jbigkey = pkg
5238                 depth = pkg.depth + 1
5239                 removal_action = "remove" in self.myparams
5240
5241                 edepend={}
5242                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5243                 for k in depkeys:
5244                         edepend[k] = metadata[k]
5245
5246                 if not pkg.built and \
5247                         "--buildpkgonly" in self.myopts and \
5248                         "deep" not in self.myparams and \
5249                         "empty" not in self.myparams:
5250                         edepend["RDEPEND"] = ""
5251                         edepend["PDEPEND"] = ""
5252                 bdeps_optional = False
5253
5254                 if pkg.built and not removal_action:
5255                         if self.myopts.get("--with-bdeps", "n") == "y":
5256                                 # Pull in build time deps as requested, but marked them as
5257                                 # "optional" since they are not strictly required. This allows
5258                                 # more freedom in the merge order calculation for solving
5259                                 # circular dependencies. Don't convert to PDEPEND since that
5260                                 # could make --with-bdeps=y less effective if it is used to
5261                                 # adjust merge order to prevent built_with_use() calls from
5262                                 # failing.
5263                                 bdeps_optional = True
5264                         else:
5265                                 # built packages do not have build time dependencies.
5266                                 edepend["DEPEND"] = ""
5267
5268                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5269                         edepend["DEPEND"] = ""
5270
5271                 deps = (
5272                         ("/", edepend["DEPEND"],
5273                                 self._priority(buildtime=(not bdeps_optional),
5274                                 optional=bdeps_optional)),
5275                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5276                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5277                 )
5278
5279                 debug = "--debug" in self.myopts
5280                 strict = mytype != "installed"
5281                 try:
5282                         for dep_root, dep_string, dep_priority in deps:
5283                                 if not dep_string:
5284                                         continue
5285                                 if debug:
5286                                         print
5287                                         print "Parent:   ", jbigkey
5288                                         print "Depstring:", dep_string
5289                                         print "Priority:", dep_priority
5290                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5291                                 try:
5292                                         selected_atoms = self._select_atoms(dep_root,
5293                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5294                                                 priority=dep_priority)
5295                                 except portage.exception.InvalidDependString, e:
5296                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5297                                         return 0
5298                                 if debug:
5299                                         print "Candidates:", selected_atoms
5300
5301                                 for atom in selected_atoms:
5302                                         try:
5303
5304                                                 atom = portage.dep.Atom(atom)
5305
5306                                                 mypriority = dep_priority.copy()
5307                                                 if not atom.blocker and vardb.match(atom):
5308                                                         mypriority.satisfied = True
5309
5310                                                 if not self._add_dep(Dependency(atom=atom,
5311                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5312                                                         priority=mypriority, root=dep_root),
5313                                                         allow_unsatisfied=allow_unsatisfied):
5314                                                         return 0
5315
5316                                         except portage.exception.InvalidAtom, e:
5317                                                 show_invalid_depstring_notice(
5318                                                         pkg, dep_string, str(e))
5319                                                 del e
5320                                                 if not pkg.installed:
5321                                                         return 0
5322
5323                                 if debug:
5324                                         print "Exiting...", jbigkey
5325                 except portage.exception.AmbiguousPackageName, e:
5326                         pkgs = e.args[0]
5327                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5328                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5329                         for cpv in pkgs:
5330                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5331                         portage.writemsg("\n", noiselevel=-1)
5332                         if mytype == "binary":
5333                                 portage.writemsg(
5334                                         "!!! This binary package cannot be installed: '%s'\n" % \
5335                                         mykey, noiselevel=-1)
5336                         elif mytype == "ebuild":
5337                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5338                                 myebuild, mylocation = portdb.findname2(mykey)
5339                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5340                                         "'%s'\n" % myebuild, noiselevel=-1)
5341                         portage.writemsg("!!! Please notify the package maintainer " + \
5342                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5343                         return 0
5344                 return 1
5345
5346         def _priority(self, **kwargs):
5347                 if "remove" in self.myparams:
5348                         priority_constructor = UnmergeDepPriority
5349                 else:
5350                         priority_constructor = DepPriority
5351                 return priority_constructor(**kwargs)
5352
5353         def _dep_expand(self, root_config, atom_without_category):
5354                 """
5355                 @param root_config: a root config instance
5356                 @type root_config: RootConfig
5357                 @param atom_without_category: an atom without a category component
5358                 @type atom_without_category: String
5359                 @rtype: list
5360                 @returns: a list of atoms containing categories (possibly empty)
5361                 """
5362                 null_cp = portage.dep_getkey(insert_category_into_atom(
5363                         atom_without_category, "null"))
5364                 cat, atom_pn = portage.catsplit(null_cp)
5365
5366                 dbs = self._filtered_trees[root_config.root]["dbs"]
5367                 categories = set()
5368                 for db, pkg_type, built, installed, db_keys in dbs:
5369                         for cat in db.categories:
5370                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5371                                         categories.add(cat)
5372
5373                 deps = []
5374                 for cat in categories:
5375                         deps.append(insert_category_into_atom(
5376                                 atom_without_category, cat))
5377                 return deps
5378
5379         def _have_new_virt(self, root, atom_cp):
5380                 ret = False
5381                 for db, pkg_type, built, installed, db_keys in \
5382                         self._filtered_trees[root]["dbs"]:
5383                         if db.cp_list(atom_cp):
5384                                 ret = True
5385                                 break
5386                 return ret
5387
5388         def _iter_atoms_for_pkg(self, pkg):
5389                 # TODO: add multiple $ROOT support
5390                 if pkg.root != self.target_root:
5391                         return
5392                 atom_arg_map = self._atom_arg_map
5393                 root_config = self.roots[pkg.root]
5394                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5395                         atom_cp = portage.dep_getkey(atom)
5396                         if atom_cp != pkg.cp and \
5397                                 self._have_new_virt(pkg.root, atom_cp):
5398                                 continue
5399                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5400                         visible_pkgs.reverse() # descending order
5401                         higher_slot = None
5402                         for visible_pkg in visible_pkgs:
5403                                 if visible_pkg.cp != atom_cp:
5404                                         continue
5405                                 if pkg >= visible_pkg:
5406                                         # This is descending order, and we're not
5407                                         # interested in any versions <= pkg given.
5408                                         break
5409                                 if pkg.slot_atom != visible_pkg.slot_atom:
5410                                         higher_slot = visible_pkg
5411                                         break
5412                         if higher_slot is not None:
5413                                 continue
5414                         for arg in atom_arg_map[(atom, pkg.root)]:
5415                                 if isinstance(arg, PackageArg) and \
5416                                         arg.package != pkg:
5417                                         continue
5418                                 yield arg, atom
5419
5420         def select_files(self, myfiles):
5421                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5422                 appropriate depgraph and return a favorite list."""
5423                 debug = "--debug" in self.myopts
5424                 root_config = self.roots[self.target_root]
5425                 sets = root_config.sets
5426                 getSetAtoms = root_config.setconfig.getSetAtoms
5427                 myfavorites=[]
5428                 myroot = self.target_root
5429                 dbs = self._filtered_trees[myroot]["dbs"]
5430                 vardb = self.trees[myroot]["vartree"].dbapi
5431                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5432                 portdb = self.trees[myroot]["porttree"].dbapi
5433                 bindb = self.trees[myroot]["bintree"].dbapi
5434                 pkgsettings = self.pkgsettings[myroot]
5435                 args = []
5436                 onlydeps = "--onlydeps" in self.myopts
5437                 lookup_owners = []
5438                 for x in myfiles:
5439                         ext = os.path.splitext(x)[1]
5440                         if ext==".tbz2":
5441                                 if not os.path.exists(x):
5442                                         if os.path.exists(
5443                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5444                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5445                                         elif os.path.exists(
5446                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5447                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5448                                         else:
5449                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5450                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5451                                                 return 0, myfavorites
5452                                 mytbz2=portage.xpak.tbz2(x)
5453                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5454                                 if os.path.realpath(x) != \
5455                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5456                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5457                                         return 0, myfavorites
5458                                 db_keys = list(bindb._aux_cache_keys)
5459                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5460                                 pkg = Package(type_name="binary", root_config=root_config,
5461                                         cpv=mykey, built=True, metadata=metadata,
5462                                         onlydeps=onlydeps)
5463                                 self._pkg_cache[pkg] = pkg
5464                                 args.append(PackageArg(arg=x, package=pkg,
5465                                         root_config=root_config))
5466                         elif ext==".ebuild":
5467                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5468                                 pkgdir = os.path.dirname(ebuild_path)
5469                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5470                                 cp = pkgdir[len(tree_root)+1:]
5471                                 e = portage.exception.PackageNotFound(
5472                                         ("%s is not in a valid portage tree " + \
5473                                         "hierarchy or does not exist") % x)
5474                                 if not portage.isvalidatom(cp):
5475                                         raise e
5476                                 cat = portage.catsplit(cp)[0]
5477                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5478                                 if not portage.isvalidatom("="+mykey):
5479                                         raise e
5480                                 ebuild_path = portdb.findname(mykey)
5481                                 if ebuild_path:
5482                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5483                                                 cp, os.path.basename(ebuild_path)):
5484                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5485                                                 return 0, myfavorites
5486                                         if mykey not in portdb.xmatch(
5487                                                 "match-visible", portage.dep_getkey(mykey)):
5488                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5489                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5490                                                 print colorize("BAD", "*** page for details.")
5491                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5492                                                         "Continuing...")
5493                                 else:
5494                                         raise portage.exception.PackageNotFound(
5495                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5496                                 db_keys = list(portdb._aux_cache_keys)
5497                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5498                                 pkg = Package(type_name="ebuild", root_config=root_config,
5499                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5500                                 pkgsettings.setcpv(pkg)
5501                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5502                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5503                                 self._pkg_cache[pkg] = pkg
5504                                 args.append(PackageArg(arg=x, package=pkg,
5505                                         root_config=root_config))
5506                         elif x.startswith(os.path.sep):
5507                                 if not x.startswith(myroot):
5508                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5509                                                 " $ROOT.\n") % x, noiselevel=-1)
5510                                         return 0, []
5511                                 # Queue these up since it's most efficient to handle
5512                                 # multiple files in a single iter_owners() call.
5513                                 lookup_owners.append(x)
5514                         else:
5515                                 if x in ("system", "world"):
5516                                         x = SETPREFIX + x
5517                                 if x.startswith(SETPREFIX):
5518                                         s = x[len(SETPREFIX):]
5519                                         if s not in sets:
5520                                                 raise portage.exception.PackageSetNotFound(s)
5521                                         if s in self._sets:
5522                                                 continue
5523                                         # Recursively expand sets so that containment tests in
5524                                         # self._get_parent_sets() properly match atoms in nested
5525                                         # sets (like if world contains system).
5526                                         expanded_set = InternalPackageSet(
5527                                                 initial_atoms=getSetAtoms(s))
5528                                         self._sets[s] = expanded_set
5529                                         args.append(SetArg(arg=x, set=expanded_set,
5530                                                 root_config=root_config))
5531                                         continue
5532                                 if not is_valid_package_atom(x):
5533                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5534                                                 noiselevel=-1)
5535                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5536                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5537                                         return (0,[])
5538                                 # Don't expand categories or old-style virtuals here unless
5539                                 # necessary. Expansion of old-style virtuals here causes at
5540                                 # least the following problems:
5541                                 #   1) It's more difficult to determine which set(s) an atom
5542                                 #      came from, if any.
5543                                 #   2) It takes away freedom from the resolver to choose other
5544                                 #      possible expansions when necessary.
5545                                 if "/" in x:
5546                                         args.append(AtomArg(arg=x, atom=x,
5547                                                 root_config=root_config))
5548                                         continue
5549                                 expanded_atoms = self._dep_expand(root_config, x)
5550                                 installed_cp_set = set()
5551                                 for atom in expanded_atoms:
5552                                         atom_cp = portage.dep_getkey(atom)
5553                                         if vardb.cp_list(atom_cp):
5554                                                 installed_cp_set.add(atom_cp)
5555                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5556                                         installed_cp = iter(installed_cp_set).next()
5557                                         expanded_atoms = [atom for atom in expanded_atoms \
5558                                                 if portage.dep_getkey(atom) == installed_cp]
5559
5560                                 if len(expanded_atoms) > 1:
5561                                         print
5562                                         print
5563                                         ambiguous_package_name(x, expanded_atoms, root_config,
5564                                                 self.spinner, self.myopts)
5565                                         return False, myfavorites
5566                                 if expanded_atoms:
5567                                         atom = expanded_atoms[0]
5568                                 else:
5569                                         null_atom = insert_category_into_atom(x, "null")
5570                                         null_cp = portage.dep_getkey(null_atom)
5571                                         cat, atom_pn = portage.catsplit(null_cp)
5572                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5573                                         if virts_p:
5574                                                 # Allow the depgraph to choose which virtual.
5575                                                 atom = insert_category_into_atom(x, "virtual")
5576                                         else:
5577                                                 atom = insert_category_into_atom(x, "null")
5578
5579                                 args.append(AtomArg(arg=x, atom=atom,
5580                                         root_config=root_config))
5581
5582                 if lookup_owners:
5583                         relative_paths = []
5584                         search_for_multiple = False
5585                         if len(lookup_owners) > 1:
5586                                 search_for_multiple = True
5587
5588                         for x in lookup_owners:
5589                                 if not search_for_multiple and os.path.isdir(x):
5590                                         search_for_multiple = True
5591                                 relative_paths.append(x[len(myroot):])
5592
5593                         owners = set()
5594                         for pkg, relative_path in \
5595                                 real_vardb._owners.iter_owners(relative_paths):
5596                                 owners.add(pkg.mycpv)
5597                                 if not search_for_multiple:
5598                                         break
5599
5600                         if not owners:
5601                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5602                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5603                                 return 0, []
5604
5605                         for cpv in owners:
5606                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5607                                 if not slot:
5608                                         # portage now masks packages with missing slot, but it's
5609                                         # possible that one was installed by an older version
5610                                         atom = portage.cpv_getkey(cpv)
5611                                 else:
5612                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5613                                 args.append(AtomArg(arg=atom, atom=atom,
5614                                         root_config=root_config))
5615
5616                 if "--update" in self.myopts:
5617                         # In some cases, the greedy slots behavior can pull in a slot that
5618                         # the user would want to uninstall due to it being blocked by a
5619                         # newer version in a different slot. Therefore, it's necessary to
5620                         # detect and discard any that should be uninstalled. Each time
5621                         # that arguments are updated, package selections are repeated in
5622                         # order to ensure consistency with the current arguments:
5623                         #
5624                         #  1) Initialize args
5625                         #  2) Select packages and generate initial greedy atoms
5626                         #  3) Update args with greedy atoms
5627                         #  4) Select packages and generate greedy atoms again, while
5628                         #     accounting for any blockers between selected packages
5629                         #  5) Update args with revised greedy atoms
5630
5631                         self._set_args(args)
5632                         greedy_args = []
5633                         for arg in args:
5634                                 greedy_args.append(arg)
5635                                 if not isinstance(arg, AtomArg):
5636                                         continue
5637                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5638                                         greedy_args.append(
5639                                                 AtomArg(arg=arg.arg, atom=atom,
5640                                                         root_config=arg.root_config))
5641
5642                         self._set_args(greedy_args)
5643                         del greedy_args
5644
5645                         # Revise greedy atoms, accounting for any blockers
5646                         # between selected packages.
5647                         revised_greedy_args = []
5648                         for arg in args:
5649                                 revised_greedy_args.append(arg)
5650                                 if not isinstance(arg, AtomArg):
5651                                         continue
5652                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5653                                         blocker_lookahead=True):
5654                                         revised_greedy_args.append(
5655                                                 AtomArg(arg=arg.arg, atom=atom,
5656                                                         root_config=arg.root_config))
5657                         args = revised_greedy_args
5658                         del revised_greedy_args
5659
5660                 self._set_args(args)
5661
5662                 myfavorites = set(myfavorites)
5663                 for arg in args:
5664                         if isinstance(arg, (AtomArg, PackageArg)):
5665                                 myfavorites.add(arg.atom)
5666                         elif isinstance(arg, SetArg):
5667                                 myfavorites.add(arg.arg)
5668                 myfavorites = list(myfavorites)
5669
5670                 pprovideddict = pkgsettings.pprovideddict
5671                 if debug:
5672                         portage.writemsg("\n", noiselevel=-1)
5673                 # Order needs to be preserved since a feature of --nodeps
5674                 # is to allow the user to force a specific merge order.
5675                 args.reverse()
5676                 while args:
5677                         arg = args.pop()
5678                         for atom in arg.set:
5679                                 self.spinner.update()
5680                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5681                                         root=myroot, parent=arg)
5682                                 atom_cp = portage.dep_getkey(atom)
5683                                 try:
5684                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5685                                         if pprovided and portage.match_from_list(atom, pprovided):
5686                                                 # A provided package has been specified on the command line.
5687                                                 self._pprovided_args.append((arg, atom))
5688                                                 continue
5689                                         if isinstance(arg, PackageArg):
5690                                                 if not self._add_pkg(arg.package, dep) or \
5691                                                         not self._create_graph():
5692                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5693                                                                 "dependencies for %s\n") % arg.arg)
5694                                                         return 0, myfavorites
5695                                                 continue
5696                                         if debug:
5697                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5698                                                         (arg, atom), noiselevel=-1)
5699                                         pkg, existing_node = self._select_package(
5700                                                 myroot, atom, onlydeps=onlydeps)
5701                                         if not pkg:
5702                                                 if not (isinstance(arg, SetArg) and \
5703                                                         arg.name in ("system", "world")):
5704                                                         self._unsatisfied_deps_for_display.append(
5705                                                                 ((myroot, atom), {}))
5706                                                         return 0, myfavorites
5707                                                 self._missing_args.append((arg, atom))
5708                                                 continue
5709                                         if atom_cp != pkg.cp:
5710                                                 # For old-style virtuals, we need to repeat the
5711                                                 # package.provided check against the selected package.
5712                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5713                                                 pprovided = pprovideddict.get(pkg.cp)
5714                                                 if pprovided and \
5715                                                         portage.match_from_list(expanded_atom, pprovided):
5716                                                         # A provided package has been
5717                                                         # specified on the command line.
5718                                                         self._pprovided_args.append((arg, atom))
5719                                                         continue
5720                                         if pkg.installed and "selective" not in self.myparams:
5721                                                 self._unsatisfied_deps_for_display.append(
5722                                                         ((myroot, atom), {}))
5723                                                 # Previous behavior was to bail out in this case, but
5724                                                 # since the dep is satisfied by the installed package,
5725                                                 # it's more friendly to continue building the graph
5726                                                 # and just show a warning message. Therefore, only bail
5727                                                 # out here if the atom is not from either the system or
5728                                                 # world set.
5729                                                 if not (isinstance(arg, SetArg) and \
5730                                                         arg.name in ("system", "world")):
5731                                                         return 0, myfavorites
5732
5733                                         # Add the selected package to the graph as soon as possible
5734                                         # so that later dep_check() calls can use it as feedback
5735                                         # for making more consistent atom selections.
5736                                         if not self._add_pkg(pkg, dep):
5737                                                 if isinstance(arg, SetArg):
5738                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5739                                                                 "dependencies for %s from %s\n") % \
5740                                                                 (atom, arg.arg))
5741                                                 else:
5742                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5743                                                                 "dependencies for %s\n") % atom)
5744                                                 return 0, myfavorites
5745
5746                                 except portage.exception.MissingSignature, e:
5747                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5748                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5749                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5750                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5751                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5752                                         return 0, myfavorites
5753                                 except portage.exception.InvalidSignature, e:
5754                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5755                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5756                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5757                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5758                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5759                                         return 0, myfavorites
5760                                 except SystemExit, e:
5761                                         raise # Needed else can't exit
5762                                 except Exception, e:
5763                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5764                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5765                                         raise
5766
5767                 # Now that the root packages have been added to the graph,
5768                 # process the dependencies.
5769                 if not self._create_graph():
5770                         return 0, myfavorites
5771
5772                 missing=0
5773                 if "--usepkgonly" in self.myopts:
5774                         for xs in self.digraph.all_nodes():
5775                                 if not isinstance(xs, Package):
5776                                         continue
5777                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5778                                         if missing == 0:
5779                                                 print
5780                                         missing += 1
5781                                         print "Missing binary for:",xs[2]
5782
5783                 try:
5784                         self.altlist()
5785                 except self._unknown_internal_error:
5786                         return False, myfavorites
5787
5788                 # We're true here unless we are missing binaries.
5789                 return (not missing,myfavorites)
5790
5791         def _set_args(self, args):
5792                 """
5793                 Create the "args" package set from atoms and packages given as
5794                 arguments. This method can be called multiple times if necessary.
5795                 The package selection cache is automatically invalidated, since
5796                 arguments influence package selections.
5797                 """
5798                 args_set = self._sets["args"]
5799                 args_set.clear()
5800                 for arg in args:
5801                         if not isinstance(arg, (AtomArg, PackageArg)):
5802                                 continue
5803                         atom = arg.atom
5804                         if atom in args_set:
5805                                 continue
5806                         args_set.add(atom)
5807
5808                 self._set_atoms.clear()
5809                 self._set_atoms.update(chain(*self._sets.itervalues()))
5810                 atom_arg_map = self._atom_arg_map
5811                 atom_arg_map.clear()
5812                 for arg in args:
5813                         for atom in arg.set:
5814                                 atom_key = (atom, arg.root_config.root)
5815                                 refs = atom_arg_map.get(atom_key)
5816                                 if refs is None:
5817                                         refs = []
5818                                         atom_arg_map[atom_key] = refs
5819                                         if arg not in refs:
5820                                                 refs.append(arg)
5821
5822                 # Invalidate the package selection cache, since
5823                 # arguments influence package selections.
5824                 self._highest_pkg_cache.clear()
5825                 for trees in self._filtered_trees.itervalues():
5826                         trees["porttree"].dbapi._clear_cache()
5827
5828         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5829                 """
5830                 Return a list of slot atoms corresponding to installed slots that
5831                 differ from the slot of the highest visible match. When
5832                 blocker_lookahead is True, slot atoms that would trigger a blocker
5833                 conflict are automatically discarded, potentially allowing automatic
5834                 uninstallation of older slots when appropriate.
5835                 """
5836                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5837                 if highest_pkg is None:
5838                         return []
5839                 vardb = root_config.trees["vartree"].dbapi
5840                 slots = set()
5841                 for cpv in vardb.match(atom):
5842                         # don't mix new virtuals with old virtuals
5843                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5844                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5845
5846                 slots.add(highest_pkg.metadata["SLOT"])
5847                 if len(slots) == 1:
5848                         return []
5849                 greedy_pkgs = []
5850                 slots.remove(highest_pkg.metadata["SLOT"])
5851                 while slots:
5852                         slot = slots.pop()
5853                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5854                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5855                         if pkg is not None and \
5856                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5857                                 greedy_pkgs.append(pkg)
5858                 if not greedy_pkgs:
5859                         return []
5860                 if not blocker_lookahead:
5861                         return [pkg.slot_atom for pkg in greedy_pkgs]
5862
5863                 blockers = {}
5864                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5865                 for pkg in greedy_pkgs + [highest_pkg]:
5866                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5867                         try:
5868                                 atoms = self._select_atoms(
5869                                         pkg.root, dep_str, pkg.use.enabled,
5870                                         parent=pkg, strict=True)
5871                         except portage.exception.InvalidDependString:
5872                                 continue
5873                         blocker_atoms = (x for x in atoms if x.blocker)
5874                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5875
5876                 if highest_pkg not in blockers:
5877                         return []
5878
5879                 # filter packages with invalid deps
5880                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5881
5882                 # filter packages that conflict with highest_pkg
5883                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5884                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5885                         blockers[pkg].findAtomForPackage(highest_pkg))]
5886
5887                 if not greedy_pkgs:
5888                         return []
5889
5890                 # If two packages conflict, discard the lower version.
5891                 discard_pkgs = set()
5892                 greedy_pkgs.sort(reverse=True)
5893                 for i in xrange(len(greedy_pkgs) - 1):
5894                         pkg1 = greedy_pkgs[i]
5895                         if pkg1 in discard_pkgs:
5896                                 continue
5897                         for j in xrange(i + 1, len(greedy_pkgs)):
5898                                 pkg2 = greedy_pkgs[j]
5899                                 if pkg2 in discard_pkgs:
5900                                         continue
5901                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5902                                         blockers[pkg2].findAtomForPackage(pkg1):
5903                                         # pkg1 > pkg2
5904                                         discard_pkgs.add(pkg2)
5905
5906                 return [pkg.slot_atom for pkg in greedy_pkgs \
5907                         if pkg not in discard_pkgs]
5908
5909         def _select_atoms_from_graph(self, *pargs, **kwargs):
5910                 """
5911                 Prefer atoms matching packages that have already been
5912                 added to the graph or those that are installed and have
5913                 not been scheduled for replacement.
5914                 """
5915                 kwargs["trees"] = self._graph_trees
5916                 return self._select_atoms_highest_available(*pargs, **kwargs)
5917
5918         def _select_atoms_highest_available(self, root, depstring,
5919                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5920                 """This will raise InvalidDependString if necessary. If trees is
5921                 None then self._filtered_trees is used."""
5922                 pkgsettings = self.pkgsettings[root]
5923                 if trees is None:
5924                         trees = self._filtered_trees
5925                 if not getattr(priority, "buildtime", False):
5926                         # The parent should only be passed to dep_check() for buildtime
5927                         # dependencies since that's the only case when it's appropriate
5928                         # to trigger the circular dependency avoidance code which uses it.
5929                         # It's important not to trigger the same circular dependency
5930                         # avoidance code for runtime dependencies since it's not needed
5931                         # and it can promote an incorrect package choice.
5932                         parent = None
5933                 if True:
5934                         try:
5935                                 if parent is not None:
5936                                         trees[root]["parent"] = parent
5937                                 if not strict:
5938                                         portage.dep._dep_check_strict = False
5939                                 mycheck = portage.dep_check(depstring, None,
5940                                         pkgsettings, myuse=myuse,
5941                                         myroot=root, trees=trees)
5942                         finally:
5943                                 if parent is not None:
5944                                         trees[root].pop("parent")
5945                                 portage.dep._dep_check_strict = True
5946                         if not mycheck[0]:
5947                                 raise portage.exception.InvalidDependString(mycheck[1])
5948                         selected_atoms = mycheck[1]
5949                 return selected_atoms
5950
5951         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5952                 atom = portage.dep.Atom(atom)
5953                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5954                 atom_without_use = atom
5955                 if atom.use:
5956                         atom_without_use = portage.dep.remove_slot(atom)
5957                         if atom.slot:
5958                                 atom_without_use += ":" + atom.slot
5959                         atom_without_use = portage.dep.Atom(atom_without_use)
5960                 xinfo = '"%s"' % atom
5961                 if arg:
5962                         xinfo='"%s"' % arg
5963                 # Discard null/ from failed cpv_expand category expansion.
5964                 xinfo = xinfo.replace("null/", "")
5965                 masked_packages = []
5966                 missing_use = []
5967                 masked_pkg_instances = set()
5968                 missing_licenses = []
5969                 have_eapi_mask = False
5970                 pkgsettings = self.pkgsettings[root]
5971                 implicit_iuse = pkgsettings._get_implicit_iuse()
5972                 root_config = self.roots[root]
5973                 portdb = self.roots[root].trees["porttree"].dbapi
5974                 dbs = self._filtered_trees[root]["dbs"]
5975                 for db, pkg_type, built, installed, db_keys in dbs:
5976                         if installed:
5977                                 continue
5978                         match = db.match
5979                         if hasattr(db, "xmatch"):
5980                                 cpv_list = db.xmatch("match-all", atom_without_use)
5981                         else:
5982                                 cpv_list = db.match(atom_without_use)
5983                         # descending order
5984                         cpv_list.reverse()
5985                         for cpv in cpv_list:
5986                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5987                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5988                                 if metadata is not None:
5989                                         pkg = Package(built=built, cpv=cpv,
5990                                                 installed=installed, metadata=metadata,
5991                                                 root_config=root_config)
5992                                         if pkg.cp != atom.cp:
5993                                                 # A cpv can be returned from dbapi.match() as an
5994                                                 # old-style virtual match even in cases when the
5995                                                 # package does not actually PROVIDE the virtual.
5996                                                 # Filter out any such false matches here.
5997                                                 if not atom_set.findAtomForPackage(pkg):
5998                                                         continue
5999                                         if mreasons:
6000                                                 masked_pkg_instances.add(pkg)
6001                                         if atom.use:
6002                                                 missing_use.append(pkg)
6003                                                 if not mreasons:
6004                                                         continue
6005                                 masked_packages.append(
6006                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6007
6008                 missing_use_reasons = []
6009                 missing_iuse_reasons = []
6010                 for pkg in missing_use:
6011                         use = pkg.use.enabled
6012                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6013                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6014                         missing_iuse = []
6015                         for x in atom.use.required:
6016                                 if iuse_re.match(x) is None:
6017                                         missing_iuse.append(x)
6018                         mreasons = []
6019                         if missing_iuse:
6020                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6021                                 missing_iuse_reasons.append((pkg, mreasons))
6022                         else:
6023                                 need_enable = sorted(atom.use.enabled.difference(use))
6024                                 need_disable = sorted(atom.use.disabled.intersection(use))
6025                                 if need_enable or need_disable:
6026                                         changes = []
6027                                         changes.extend(colorize("red", "+" + x) \
6028                                                 for x in need_enable)
6029                                         changes.extend(colorize("blue", "-" + x) \
6030                                                 for x in need_disable)
6031                                         mreasons.append("Change USE: %s" % " ".join(changes))
6032                                         missing_use_reasons.append((pkg, mreasons))
6033
6034                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6035                         in missing_use_reasons if pkg not in masked_pkg_instances]
6036
6037                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6038                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6039
6040                 show_missing_use = False
6041                 if unmasked_use_reasons:
6042                         # Only show the latest version.
6043                         show_missing_use = unmasked_use_reasons[:1]
6044                 elif unmasked_iuse_reasons:
6045                         if missing_use_reasons:
6046                                 # All packages with required IUSE are masked,
6047                                 # so display a normal masking message.
6048                                 pass
6049                         else:
6050                                 show_missing_use = unmasked_iuse_reasons
6051
6052                 if show_missing_use:
6053                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6054                         print "!!! One of the following packages is required to complete your request:"
6055                         for pkg, mreasons in show_missing_use:
6056                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6057
6058                 elif masked_packages:
6059                         print "\n!!! " + \
6060                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6061                                 colorize("INFORM", xinfo) + \
6062                                 colorize("BAD", " have been masked.")
6063                         print "!!! One of the following masked packages is required to complete your request:"
6064                         have_eapi_mask = show_masked_packages(masked_packages)
6065                         if have_eapi_mask:
6066                                 print
6067                                 msg = ("The current version of portage supports " + \
6068                                         "EAPI '%s'. You must upgrade to a newer version" + \
6069                                         " of portage before EAPI masked packages can" + \
6070                                         " be installed.") % portage.const.EAPI
6071                                 from textwrap import wrap
6072                                 for line in wrap(msg, 75):
6073                                         print line
6074                         print
6075                         show_mask_docs()
6076                 else:
6077                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6078
6079                 # Show parent nodes and the argument that pulled them in.
6080                 traversed_nodes = set()
6081                 node = myparent
6082                 msg = []
6083                 while node is not None:
6084                         traversed_nodes.add(node)
6085                         msg.append('(dependency required by "%s" [%s])' % \
6086                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6087                         # When traversing to parents, prefer arguments over packages
6088                         # since arguments are root nodes. Never traverse the same
6089                         # package twice, in order to prevent an infinite loop.
6090                         selected_parent = None
6091                         for parent in self.digraph.parent_nodes(node):
6092                                 if isinstance(parent, DependencyArg):
6093                                         msg.append('(dependency required by "%s" [argument])' % \
6094                                                 (colorize('INFORM', str(parent))))
6095                                         selected_parent = None
6096                                         break
6097                                 if parent not in traversed_nodes:
6098                                         selected_parent = parent
6099                         node = selected_parent
6100                 for line in msg:
6101                         print line
6102
6103                 print
6104
6105         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6106                 cache_key = (root, atom, onlydeps)
6107                 ret = self._highest_pkg_cache.get(cache_key)
6108                 if ret is not None:
6109                         pkg, existing = ret
6110                         if pkg and not existing:
6111                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6112                                 if existing and existing == pkg:
6113                                         # Update the cache to reflect that the
6114                                         # package has been added to the graph.
6115                                         ret = pkg, pkg
6116                                         self._highest_pkg_cache[cache_key] = ret
6117                         return ret
6118                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6119                 self._highest_pkg_cache[cache_key] = ret
6120                 pkg, existing = ret
6121                 if pkg is not None:
6122                         settings = pkg.root_config.settings
6123                         if visible(settings, pkg) and not (pkg.installed and \
6124                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6125                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6126                 return ret
6127
6128         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6129                 root_config = self.roots[root]
6130                 pkgsettings = self.pkgsettings[root]
6131                 dbs = self._filtered_trees[root]["dbs"]
6132                 vardb = self.roots[root].trees["vartree"].dbapi
6133                 portdb = self.roots[root].trees["porttree"].dbapi
6134                 # List of acceptable packages, ordered by type preference.
6135                 matched_packages = []
6136                 highest_version = None
6137                 if not isinstance(atom, portage.dep.Atom):
6138                         atom = portage.dep.Atom(atom)
6139                 atom_cp = atom.cp
6140                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6141                 existing_node = None
6142                 myeb = None
6143                 usepkgonly = "--usepkgonly" in self.myopts
6144                 empty = "empty" in self.myparams
6145                 selective = "selective" in self.myparams
6146                 reinstall = False
6147                 noreplace = "--noreplace" in self.myopts
6148                 # Behavior of the "selective" parameter depends on
6149                 # whether or not a package matches an argument atom.
6150                 # If an installed package provides an old-style
6151                 # virtual that is no longer provided by an available
6152                 # package, the installed package may match an argument
6153                 # atom even though none of the available packages do.
6154                 # Therefore, "selective" logic does not consider
6155                 # whether or not an installed package matches an
6156                 # argument atom. It only considers whether or not
6157                 # available packages match argument atoms, which is
6158                 # represented by the found_available_arg flag.
6159                 found_available_arg = False
6160                 for find_existing_node in True, False:
6161                         if existing_node:
6162                                 break
6163                         for db, pkg_type, built, installed, db_keys in dbs:
6164                                 if existing_node:
6165                                         break
6166                                 if installed and not find_existing_node:
6167                                         want_reinstall = reinstall or empty or \
6168                                                 (found_available_arg and not selective)
6169                                         if want_reinstall and matched_packages:
6170                                                 continue
6171                                 if hasattr(db, "xmatch"):
6172                                         cpv_list = db.xmatch("match-all", atom)
6173                                 else:
6174                                         cpv_list = db.match(atom)
6175
6176                                 # USE=multislot can make an installed package appear as if
6177                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6178                                 # won't do any good as long as USE=multislot is enabled since
6179                                 # the newly built package still won't have the expected slot.
6180                                 # Therefore, assume that such SLOT dependencies are already
6181                                 # satisfied rather than forcing a rebuild.
6182                                 if installed and not cpv_list and atom.slot:
6183                                         for cpv in db.match(atom.cp):
6184                                                 slot_available = False
6185                                                 for other_db, other_type, other_built, \
6186                                                         other_installed, other_keys in dbs:
6187                                                         try:
6188                                                                 if atom.slot == \
6189                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6190                                                                         slot_available = True
6191                                                                         break
6192                                                         except KeyError:
6193                                                                 pass
6194                                                 if not slot_available:
6195                                                         continue
6196                                                 inst_pkg = self._pkg(cpv, "installed",
6197                                                         root_config, installed=installed)
6198                                                 # Remove the slot from the atom and verify that
6199                                                 # the package matches the resulting atom.
6200                                                 atom_without_slot = portage.dep.remove_slot(atom)
6201                                                 if atom.use:
6202                                                         atom_without_slot += str(atom.use)
6203                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6204                                                 if portage.match_from_list(
6205                                                         atom_without_slot, [inst_pkg]):
6206                                                         cpv_list = [inst_pkg.cpv]
6207                                                 break
6208
6209                                 if not cpv_list:
6210                                         continue
6211                                 pkg_status = "merge"
6212                                 if installed or onlydeps:
6213                                         pkg_status = "nomerge"
6214                                 # descending order
6215                                 cpv_list.reverse()
6216                                 for cpv in cpv_list:
6217                                         # Make --noreplace take precedence over --newuse.
6218                                         if not installed and noreplace and \
6219                                                 cpv in vardb.match(atom):
6220                                                 # If the installed version is masked, it may
6221                                                 # be necessary to look at lower versions,
6222                                                 # in case there is a visible downgrade.
6223                                                 continue
6224                                         reinstall_for_flags = None
6225                                         cache_key = (pkg_type, root, cpv, pkg_status)
6226                                         calculated_use = True
6227                                         pkg = self._pkg_cache.get(cache_key)
6228                                         if pkg is None:
6229                                                 calculated_use = False
6230                                                 try:
6231                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6232                                                 except KeyError:
6233                                                         continue
6234                                                 pkg = Package(built=built, cpv=cpv,
6235                                                         installed=installed, metadata=metadata,
6236                                                         onlydeps=onlydeps, root_config=root_config,
6237                                                         type_name=pkg_type)
6238                                                 metadata = pkg.metadata
6239                                                 if not built:
6240                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6241                                                 if not built and ("?" in metadata["LICENSE"] or \
6242                                                         "?" in metadata["PROVIDE"]):
6243                                                         # This is avoided whenever possible because
6244                                                         # it's expensive. It only needs to be done here
6245                                                         # if it has an effect on visibility.
6246                                                         pkgsettings.setcpv(pkg)
6247                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6248                                                         calculated_use = True
6249                                                 self._pkg_cache[pkg] = pkg
6250
6251                                         if not installed or (built and matched_packages):
6252                                                 # Only enforce visibility on installed packages
6253                                                 # if there is at least one other visible package
6254                                                 # available. By filtering installed masked packages
6255                                                 # here, packages that have been masked since they
6256                                                 # were installed can be automatically downgraded
6257                                                 # to an unmasked version.
6258                                                 try:
6259                                                         if not visible(pkgsettings, pkg):
6260                                                                 continue
6261                                                 except portage.exception.InvalidDependString:
6262                                                         if not installed:
6263                                                                 continue
6264
6265                                                 # Enable upgrade or downgrade to a version
6266                                                 # with visible KEYWORDS when the installed
6267                                                 # version is masked by KEYWORDS, but never
6268                                                 # reinstall the same exact version only due
6269                                                 # to a KEYWORDS mask.
6270                                                 if built and matched_packages:
6271
6272                                                         different_version = None
6273                                                         for avail_pkg in matched_packages:
6274                                                                 if not portage.dep.cpvequal(
6275                                                                         pkg.cpv, avail_pkg.cpv):
6276                                                                         different_version = avail_pkg
6277                                                                         break
6278                                                         if different_version is not None:
6279
6280                                                                 if installed and \
6281                                                                         pkgsettings._getMissingKeywords(
6282                                                                         pkg.cpv, pkg.metadata):
6283                                                                         continue
6284
6285                                                                 # If the ebuild no longer exists or it's
6286                                                                 # keywords have been dropped, reject built
6287                                                                 # instances (installed or binary).
6288                                                                 # If --usepkgonly is enabled, assume that
6289                                                                 # the ebuild status should be ignored.
6290                                                                 if not usepkgonly:
6291                                                                         try:
6292                                                                                 pkg_eb = self._pkg(
6293                                                                                         pkg.cpv, "ebuild", root_config)
6294                                                                         except portage.exception.PackageNotFound:
6295                                                                                 continue
6296                                                                         else:
6297                                                                                 if not visible(pkgsettings, pkg_eb):
6298                                                                                         continue
6299
6300                                         if not pkg.built and not calculated_use:
6301                                                 # This is avoided whenever possible because
6302                                                 # it's expensive.
6303                                                 pkgsettings.setcpv(pkg)
6304                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6305
6306                                         if pkg.cp != atom.cp:
6307                                                 # A cpv can be returned from dbapi.match() as an
6308                                                 # old-style virtual match even in cases when the
6309                                                 # package does not actually PROVIDE the virtual.
6310                                                 # Filter out any such false matches here.
6311                                                 if not atom_set.findAtomForPackage(pkg):
6312                                                         continue
6313
6314                                         myarg = None
6315                                         if root == self.target_root:
6316                                                 try:
6317                                                         # Ebuild USE must have been calculated prior
6318                                                         # to this point, in case atoms have USE deps.
6319                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6320                                                 except StopIteration:
6321                                                         pass
6322                                                 except portage.exception.InvalidDependString:
6323                                                         if not installed:
6324                                                                 # masked by corruption
6325                                                                 continue
6326                                         if not installed and myarg:
6327                                                 found_available_arg = True
6328
6329                                         if atom.use and not pkg.built:
6330                                                 use = pkg.use.enabled
6331                                                 if atom.use.enabled.difference(use):
6332                                                         continue
6333                                                 if atom.use.disabled.intersection(use):
6334                                                         continue
6335                                         if pkg.cp == atom_cp:
6336                                                 if highest_version is None:
6337                                                         highest_version = pkg
6338                                                 elif pkg > highest_version:
6339                                                         highest_version = pkg
6340                                         # At this point, we've found the highest visible
6341                                         # match from the current repo. Any lower versions
6342                                         # from this repo are ignored, so this so the loop
6343                                         # will always end with a break statement below
6344                                         # this point.
6345                                         if find_existing_node:
6346                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6347                                                 if not e_pkg:
6348                                                         break
6349                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6350                                                         if highest_version and \
6351                                                                 e_pkg.cp == atom_cp and \
6352                                                                 e_pkg < highest_version and \
6353                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6354                                                                 # There is a higher version available in a
6355                                                                 # different slot, so this existing node is
6356                                                                 # irrelevant.
6357                                                                 pass
6358                                                         else:
6359                                                                 matched_packages.append(e_pkg)
6360                                                                 existing_node = e_pkg
6361                                                 break
6362                                         # Compare built package to current config and
6363                                         # reject the built package if necessary.
6364                                         if built and not installed and \
6365                                                 ("--newuse" in self.myopts or \
6366                                                 "--reinstall" in self.myopts):
6367                                                 iuses = pkg.iuse.all
6368                                                 old_use = pkg.use.enabled
6369                                                 if myeb:
6370                                                         pkgsettings.setcpv(myeb)
6371                                                 else:
6372                                                         pkgsettings.setcpv(pkg)
6373                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6374                                                 forced_flags = set()
6375                                                 forced_flags.update(pkgsettings.useforce)
6376                                                 forced_flags.update(pkgsettings.usemask)
6377                                                 cur_iuse = iuses
6378                                                 if myeb and not usepkgonly:
6379                                                         cur_iuse = myeb.iuse.all
6380                                                 if self._reinstall_for_flags(forced_flags,
6381                                                         old_use, iuses,
6382                                                         now_use, cur_iuse):
6383                                                         break
6384                                         # Compare current config to installed package
6385                                         # and do not reinstall if possible.
6386                                         if not installed and \
6387                                                 ("--newuse" in self.myopts or \
6388                                                 "--reinstall" in self.myopts) and \
6389                                                 cpv in vardb.match(atom):
6390                                                 pkgsettings.setcpv(pkg)
6391                                                 forced_flags = set()
6392                                                 forced_flags.update(pkgsettings.useforce)
6393                                                 forced_flags.update(pkgsettings.usemask)
6394                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6395                                                 old_iuse = set(filter_iuse_defaults(
6396                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6397                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6398                                                 cur_iuse = pkg.iuse.all
6399                                                 reinstall_for_flags = \
6400                                                         self._reinstall_for_flags(
6401                                                         forced_flags, old_use, old_iuse,
6402                                                         cur_use, cur_iuse)
6403                                                 if reinstall_for_flags:
6404                                                         reinstall = True
6405                                         if not built:
6406                                                 myeb = pkg
6407                                         matched_packages.append(pkg)
6408                                         if reinstall_for_flags:
6409                                                 self._reinstall_nodes[pkg] = \
6410                                                         reinstall_for_flags
6411                                         break
6412
6413                 if not matched_packages:
6414                         return None, None
6415
6416                 if "--debug" in self.myopts:
6417                         for pkg in matched_packages:
6418                                 portage.writemsg("%s %s\n" % \
6419                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6420
6421                 # Filter out any old-style virtual matches if they are
6422                 # mixed with new-style virtual matches.
6423                 cp = portage.dep_getkey(atom)
6424                 if len(matched_packages) > 1 and \
6425                         "virtual" == portage.catsplit(cp)[0]:
6426                         for pkg in matched_packages:
6427                                 if pkg.cp != cp:
6428                                         continue
6429                                 # Got a new-style virtual, so filter
6430                                 # out any old-style virtuals.
6431                                 matched_packages = [pkg for pkg in matched_packages \
6432                                         if pkg.cp == cp]
6433                                 break
6434
6435                 if len(matched_packages) > 1:
6436                         bestmatch = portage.best(
6437                                 [pkg.cpv for pkg in matched_packages])
6438                         matched_packages = [pkg for pkg in matched_packages \
6439                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6440
6441                 # ordered by type preference ("ebuild" type is the last resort)
6442                 return  matched_packages[-1], existing_node
6443
6444         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6445                 """
6446                 Select packages that have already been added to the graph or
6447                 those that are installed and have not been scheduled for
6448                 replacement.
6449                 """
6450                 graph_db = self._graph_trees[root]["porttree"].dbapi
6451                 matches = graph_db.match_pkgs(atom)
6452                 if not matches:
6453                         return None, None
6454                 pkg = matches[-1] # highest match
6455                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6456                 return pkg, in_graph
6457
6458         def _complete_graph(self):
6459                 """
6460                 Add any deep dependencies of required sets (args, system, world) that
6461                 have not been pulled into the graph yet. This ensures that the graph
6462                 is consistent such that initially satisfied deep dependencies are not
6463                 broken in the new graph. Initially unsatisfied dependencies are
6464                 irrelevant since we only want to avoid breaking dependencies that are
6465                 intially satisfied.
6466
6467                 Since this method can consume enough time to disturb users, it is
6468                 currently only enabled by the --complete-graph option.
6469                 """
6470                 if "--buildpkgonly" in self.myopts or \
6471                         "recurse" not in self.myparams:
6472                         return 1
6473
6474                 if "complete" not in self.myparams:
6475                         # Skip this to avoid consuming enough time to disturb users.
6476                         return 1
6477
6478                 # Put the depgraph into a mode that causes it to only
6479                 # select packages that have already been added to the
6480                 # graph or those that are installed and have not been
6481                 # scheduled for replacement. Also, toggle the "deep"
6482                 # parameter so that all dependencies are traversed and
6483                 # accounted for.
6484                 self._select_atoms = self._select_atoms_from_graph
6485                 self._select_package = self._select_pkg_from_graph
6486                 already_deep = "deep" in self.myparams
6487                 if not already_deep:
6488                         self.myparams.add("deep")
6489
6490                 for root in self.roots:
6491                         required_set_names = self._required_set_names.copy()
6492                         if root == self.target_root and \
6493                                 (already_deep or "empty" in self.myparams):
6494                                 required_set_names.difference_update(self._sets)
6495                         if not required_set_names and not self._ignored_deps:
6496                                 continue
6497                         root_config = self.roots[root]
6498                         setconfig = root_config.setconfig
6499                         args = []
6500                         # Reuse existing SetArg instances when available.
6501                         for arg in self.digraph.root_nodes():
6502                                 if not isinstance(arg, SetArg):
6503                                         continue
6504                                 if arg.root_config != root_config:
6505                                         continue
6506                                 if arg.name in required_set_names:
6507                                         args.append(arg)
6508                                         required_set_names.remove(arg.name)
6509                         # Create new SetArg instances only when necessary.
6510                         for s in required_set_names:
6511                                 expanded_set = InternalPackageSet(
6512                                         initial_atoms=setconfig.getSetAtoms(s))
6513                                 atom = SETPREFIX + s
6514                                 args.append(SetArg(arg=atom, set=expanded_set,
6515                                         root_config=root_config))
6516                         vardb = root_config.trees["vartree"].dbapi
6517                         for arg in args:
6518                                 for atom in arg.set:
6519                                         self._dep_stack.append(
6520                                                 Dependency(atom=atom, root=root, parent=arg))
6521                         if self._ignored_deps:
6522                                 self._dep_stack.extend(self._ignored_deps)
6523                                 self._ignored_deps = []
6524                         if not self._create_graph(allow_unsatisfied=True):
6525                                 return 0
6526                         # Check the unsatisfied deps to see if any initially satisfied deps
6527                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6528                         # deps are irrelevant since we only want to avoid breaking deps
6529                         # that are initially satisfied.
6530                         while self._unsatisfied_deps:
6531                                 dep = self._unsatisfied_deps.pop()
6532                                 matches = vardb.match_pkgs(dep.atom)
6533                                 if not matches:
6534                                         self._initially_unsatisfied_deps.append(dep)
6535                                         continue
6536                                 # An scheduled installation broke a deep dependency.
6537                                 # Add the installed package to the graph so that it
6538                                 # will be appropriately reported as a slot collision
6539                                 # (possibly solvable via backtracking).
6540                                 pkg = matches[-1] # highest match
6541                                 if not self._add_pkg(pkg, dep):
6542                                         return 0
6543                                 if not self._create_graph(allow_unsatisfied=True):
6544                                         return 0
6545                 return 1
6546
6547         def _pkg(self, cpv, type_name, root_config, installed=False):
6548                 """
6549                 Get a package instance from the cache, or create a new
6550                 one if necessary. Raises KeyError from aux_get if it
6551                 failures for some reason (package does not exist or is
6552                 corrupt).
6553                 """
6554                 operation = "merge"
6555                 if installed:
6556                         operation = "nomerge"
6557                 pkg = self._pkg_cache.get(
6558                         (type_name, root_config.root, cpv, operation))
6559                 if pkg is None:
6560                         tree_type = self.pkg_tree_map[type_name]
6561                         db = root_config.trees[tree_type].dbapi
6562                         db_keys = list(self._trees_orig[root_config.root][
6563                                 tree_type].dbapi._aux_cache_keys)
6564                         try:
6565                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6566                         except KeyError:
6567                                 raise portage.exception.PackageNotFound(cpv)
6568                         pkg = Package(cpv=cpv, metadata=metadata,
6569                                 root_config=root_config, installed=installed)
6570                         if type_name == "ebuild":
6571                                 settings = self.pkgsettings[root_config.root]
6572                                 settings.setcpv(pkg)
6573                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6574                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6575                         self._pkg_cache[pkg] = pkg
6576                 return pkg
6577
6578         def validate_blockers(self):
6579                 """Remove any blockers from the digraph that do not match any of the
6580                 packages within the graph.  If necessary, create hard deps to ensure
6581                 correct merge order such that mutually blocking packages are never
6582                 installed simultaneously."""
6583
6584                 if "--buildpkgonly" in self.myopts or \
6585                         "--nodeps" in self.myopts:
6586                         return True
6587
6588                 #if "deep" in self.myparams:
6589                 if True:
6590                         # Pull in blockers from all installed packages that haven't already
6591                         # been pulled into the depgraph.  This is not enabled by default
6592                         # due to the performance penalty that is incurred by all the
6593                         # additional dep_check calls that are required.
6594
6595                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6596                         for myroot in self.trees:
6597                                 vardb = self.trees[myroot]["vartree"].dbapi
6598                                 portdb = self.trees[myroot]["porttree"].dbapi
6599                                 pkgsettings = self.pkgsettings[myroot]
6600                                 final_db = self.mydbapi[myroot]
6601
6602                                 blocker_cache = BlockerCache(myroot, vardb)
6603                                 stale_cache = set(blocker_cache)
6604                                 for pkg in vardb:
6605                                         cpv = pkg.cpv
6606                                         stale_cache.discard(cpv)
6607                                         pkg_in_graph = self.digraph.contains(pkg)
6608
6609                                         # Check for masked installed packages. Only warn about
6610                                         # packages that are in the graph in order to avoid warning
6611                                         # about those that will be automatically uninstalled during
6612                                         # the merge process or by --depclean.
6613                                         if pkg in final_db:
6614                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6615                                                         self._masked_installed.add(pkg)
6616
6617                                         blocker_atoms = None
6618                                         blockers = None
6619                                         if pkg_in_graph:
6620                                                 blockers = []
6621                                                 try:
6622                                                         blockers.extend(
6623                                                                 self._blocker_parents.child_nodes(pkg))
6624                                                 except KeyError:
6625                                                         pass
6626                                                 try:
6627                                                         blockers.extend(
6628                                                                 self._irrelevant_blockers.child_nodes(pkg))
6629                                                 except KeyError:
6630                                                         pass
6631                                         if blockers is not None:
6632                                                 blockers = set(str(blocker.atom) \
6633                                                         for blocker in blockers)
6634
6635                                         # If this node has any blockers, create a "nomerge"
6636                                         # node for it so that they can be enforced.
6637                                         self.spinner.update()
6638                                         blocker_data = blocker_cache.get(cpv)
6639                                         if blocker_data is not None and \
6640                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6641                                                 blocker_data = None
6642
6643                                         # If blocker data from the graph is available, use
6644                                         # it to validate the cache and update the cache if
6645                                         # it seems invalid.
6646                                         if blocker_data is not None and \
6647                                                 blockers is not None:
6648                                                 if not blockers.symmetric_difference(
6649                                                         blocker_data.atoms):
6650                                                         continue
6651                                                 blocker_data = None
6652
6653                                         if blocker_data is None and \
6654                                                 blockers is not None:
6655                                                 # Re-use the blockers from the graph.
6656                                                 blocker_atoms = sorted(blockers)
6657                                                 counter = long(pkg.metadata["COUNTER"])
6658                                                 blocker_data = \
6659                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6660                                                 blocker_cache[pkg.cpv] = blocker_data
6661                                                 continue
6662
6663                                         if blocker_data:
6664                                                 blocker_atoms = blocker_data.atoms
6665                                         else:
6666                                                 # Use aux_get() to trigger FakeVartree global
6667                                                 # updates on *DEPEND when appropriate.
6668                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6669                                                 # It is crucial to pass in final_db here in order to
6670                                                 # optimize dep_check calls by eliminating atoms via
6671                                                 # dep_wordreduce and dep_eval calls.
6672                                                 try:
6673                                                         portage.dep._dep_check_strict = False
6674                                                         try:
6675                                                                 success, atoms = portage.dep_check(depstr,
6676                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6677                                                                         trees=self._graph_trees, myroot=myroot)
6678                                                         except Exception, e:
6679                                                                 if isinstance(e, SystemExit):
6680                                                                         raise
6681                                                                 # This is helpful, for example, if a ValueError
6682                                                                 # is thrown from cpv_expand due to multiple
6683                                                                 # matches (this can happen if an atom lacks a
6684                                                                 # category).
6685                                                                 show_invalid_depstring_notice(
6686                                                                         pkg, depstr, str(e))
6687                                                                 del e
6688                                                                 raise
6689                                                 finally:
6690                                                         portage.dep._dep_check_strict = True
6691                                                 if not success:
6692                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6693                                                         if replacement_pkg and \
6694                                                                 replacement_pkg[0].operation == "merge":
6695                                                                 # This package is being replaced anyway, so
6696                                                                 # ignore invalid dependencies so as not to
6697                                                                 # annoy the user too much (otherwise they'd be
6698                                                                 # forced to manually unmerge it first).
6699                                                                 continue
6700                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6701                                                         return False
6702                                                 blocker_atoms = [myatom for myatom in atoms \
6703                                                         if myatom.startswith("!")]
6704                                                 blocker_atoms.sort()
6705                                                 counter = long(pkg.metadata["COUNTER"])
6706                                                 blocker_cache[cpv] = \
6707                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6708                                         if blocker_atoms:
6709                                                 try:
6710                                                         for atom in blocker_atoms:
6711                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6712                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6713                                                                 self._blocker_parents.add(blocker, pkg)
6714                                                 except portage.exception.InvalidAtom, e:
6715                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6716                                                         show_invalid_depstring_notice(
6717                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6718                                                         return False
6719                                 for cpv in stale_cache:
6720                                         del blocker_cache[cpv]
6721                                 blocker_cache.flush()
6722                                 del blocker_cache
6723
6724                 # Discard any "uninstall" tasks scheduled by previous calls
6725                 # to this method, since those tasks may not make sense given
6726                 # the current graph state.
6727                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6728                 if previous_uninstall_tasks:
6729                         self._blocker_uninstalls = digraph()
6730                         self.digraph.difference_update(previous_uninstall_tasks)
6731
6732                 for blocker in self._blocker_parents.leaf_nodes():
6733                         self.spinner.update()
6734                         root_config = self.roots[blocker.root]
6735                         virtuals = root_config.settings.getvirtuals()
6736                         myroot = blocker.root
6737                         initial_db = self.trees[myroot]["vartree"].dbapi
6738                         final_db = self.mydbapi[myroot]
6739                         
6740                         provider_virtual = False
6741                         if blocker.cp in virtuals and \
6742                                 not self._have_new_virt(blocker.root, blocker.cp):
6743                                 provider_virtual = True
6744
6745                         if provider_virtual:
6746                                 atoms = []
6747                                 for provider_entry in virtuals[blocker.cp]:
6748                                         provider_cp = \
6749                                                 portage.dep_getkey(provider_entry)
6750                                         atoms.append(blocker.atom.replace(
6751                                                 blocker.cp, provider_cp))
6752                         else:
6753                                 atoms = [blocker.atom]
6754
6755                         blocked_initial = []
6756                         for atom in atoms:
6757                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6758
6759                         blocked_final = []
6760                         for atom in atoms:
6761                                 blocked_final.extend(final_db.match_pkgs(atom))
6762
6763                         if not blocked_initial and not blocked_final:
6764                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6765                                 self._blocker_parents.remove(blocker)
6766                                 # Discard any parents that don't have any more blockers.
6767                                 for pkg in parent_pkgs:
6768                                         self._irrelevant_blockers.add(blocker, pkg)
6769                                         if not self._blocker_parents.child_nodes(pkg):
6770                                                 self._blocker_parents.remove(pkg)
6771                                 continue
6772                         for parent in self._blocker_parents.parent_nodes(blocker):
6773                                 unresolved_blocks = False
6774                                 depends_on_order = set()
6775                                 for pkg in blocked_initial:
6776                                         if pkg.slot_atom == parent.slot_atom:
6777                                                 # TODO: Support blocks within slots in cases where it
6778                                                 # might make sense.  For example, a new version might
6779                                                 # require that the old version be uninstalled at build
6780                                                 # time.
6781                                                 continue
6782                                         if parent.installed:
6783                                                 # Two currently installed packages conflict with
6784                                                 # eachother. Ignore this case since the damage
6785                                                 # is already done and this would be likely to
6786                                                 # confuse users if displayed like a normal blocker.
6787                                                 continue
6788
6789                                         self._blocked_pkgs.add(pkg, blocker)
6790
6791                                         if parent.operation == "merge":
6792                                                 # Maybe the blocked package can be replaced or simply
6793                                                 # unmerged to resolve this block.
6794                                                 depends_on_order.add((pkg, parent))
6795                                                 continue
6796                                         # None of the above blocker resolutions techniques apply,
6797                                         # so apparently this one is unresolvable.
6798                                         unresolved_blocks = True
6799                                 for pkg in blocked_final:
6800                                         if pkg.slot_atom == parent.slot_atom:
6801                                                 # TODO: Support blocks within slots.
6802                                                 continue
6803                                         if parent.operation == "nomerge" and \
6804                                                 pkg.operation == "nomerge":
6805                                                 # This blocker will be handled the next time that a
6806                                                 # merge of either package is triggered.
6807                                                 continue
6808
6809                                         self._blocked_pkgs.add(pkg, blocker)
6810
6811                                         # Maybe the blocking package can be
6812                                         # unmerged to resolve this block.
6813                                         if parent.operation == "merge" and pkg.installed:
6814                                                 depends_on_order.add((pkg, parent))
6815                                                 continue
6816                                         elif parent.operation == "nomerge":
6817                                                 depends_on_order.add((parent, pkg))
6818                                                 continue
6819                                         # None of the above blocker resolutions techniques apply,
6820                                         # so apparently this one is unresolvable.
6821                                         unresolved_blocks = True
6822
6823                                 # Make sure we don't unmerge any package that have been pulled
6824                                 # into the graph.
6825                                 if not unresolved_blocks and depends_on_order:
6826                                         for inst_pkg, inst_task in depends_on_order:
6827                                                 if self.digraph.contains(inst_pkg) and \
6828                                                         self.digraph.parent_nodes(inst_pkg):
6829                                                         unresolved_blocks = True
6830                                                         break
6831
6832                                 if not unresolved_blocks and depends_on_order:
6833                                         for inst_pkg, inst_task in depends_on_order:
6834                                                 uninst_task = Package(built=inst_pkg.built,
6835                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6836                                                         metadata=inst_pkg.metadata,
6837                                                         operation="uninstall",
6838                                                         root_config=inst_pkg.root_config,
6839                                                         type_name=inst_pkg.type_name)
6840                                                 self._pkg_cache[uninst_task] = uninst_task
6841                                                 # Enforce correct merge order with a hard dep.
6842                                                 self.digraph.addnode(uninst_task, inst_task,
6843                                                         priority=BlockerDepPriority.instance)
6844                                                 # Count references to this blocker so that it can be
6845                                                 # invalidated after nodes referencing it have been
6846                                                 # merged.
6847                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6848                                 if not unresolved_blocks and not depends_on_order:
6849                                         self._irrelevant_blockers.add(blocker, parent)
6850                                         self._blocker_parents.remove_edge(blocker, parent)
6851                                         if not self._blocker_parents.parent_nodes(blocker):
6852                                                 self._blocker_parents.remove(blocker)
6853                                         if not self._blocker_parents.child_nodes(parent):
6854                                                 self._blocker_parents.remove(parent)
6855                                 if unresolved_blocks:
6856                                         self._unsolvable_blockers.add(blocker, parent)
6857
6858                 return True
6859
6860         def _accept_blocker_conflicts(self):
6861                 acceptable = False
6862                 for x in ("--buildpkgonly", "--fetchonly",
6863                         "--fetch-all-uri", "--nodeps"):
6864                         if x in self.myopts:
6865                                 acceptable = True
6866                                 break
6867                 return acceptable
6868
6869         def _merge_order_bias(self, mygraph):
6870                 """
6871                 For optimal leaf node selection, promote deep system runtime deps and
6872                 order nodes from highest to lowest overall reference count.
6873                 """
6874
6875                 node_info = {}
6876                 for node in mygraph.order:
6877                         node_info[node] = len(mygraph.parent_nodes(node))
6878                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6879
6880                 def cmp_merge_preference(node1, node2):
6881
6882                         if node1.operation == 'uninstall':
6883                                 if node2.operation == 'uninstall':
6884                                         return 0
6885                                 return 1
6886
6887                         if node2.operation == 'uninstall':
6888                                 if node1.operation == 'uninstall':
6889                                         return 0
6890                                 return -1
6891
6892                         node1_sys = node1 in deep_system_deps
6893                         node2_sys = node2 in deep_system_deps
6894                         if node1_sys != node2_sys:
6895                                 if node1_sys:
6896                                         return -1
6897                                 return 1
6898
6899                         return node_info[node2] - node_info[node1]
6900
6901                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6902
6903         def altlist(self, reversed=False):
6904
6905                 while self._serialized_tasks_cache is None:
6906                         self._resolve_conflicts()
6907                         try:
6908                                 self._serialized_tasks_cache, self._scheduler_graph = \
6909                                         self._serialize_tasks()
6910                         except self._serialize_tasks_retry:
6911                                 pass
6912
6913                 retlist = self._serialized_tasks_cache[:]
6914                 if reversed:
6915                         retlist.reverse()
6916                 return retlist
6917
6918         def schedulerGraph(self):
6919                 """
6920                 The scheduler graph is identical to the normal one except that
6921                 uninstall edges are reversed in specific cases that require
6922                 conflicting packages to be temporarily installed simultaneously.
6923                 This is intended for use by the Scheduler in it's parallelization
6924                 logic. It ensures that temporary simultaneous installation of
6925                 conflicting packages is avoided when appropriate (especially for
6926                 !!atom blockers), but allowed in specific cases that require it.
6927
6928                 Note that this method calls break_refs() which alters the state of
6929                 internal Package instances such that this depgraph instance should
6930                 not be used to perform any more calculations.
6931                 """
6932                 if self._scheduler_graph is None:
6933                         self.altlist()
6934                 self.break_refs(self._scheduler_graph.order)
6935                 return self._scheduler_graph
6936
6937         def break_refs(self, nodes):
6938                 """
6939                 Take a mergelist like that returned from self.altlist() and
6940                 break any references that lead back to the depgraph. This is
6941                 useful if you want to hold references to packages without
6942                 also holding the depgraph on the heap.
6943                 """
6944                 for node in nodes:
6945                         if hasattr(node, "root_config"):
6946                                 # The FakeVartree references the _package_cache which
6947                                 # references the depgraph. So that Package instances don't
6948                                 # hold the depgraph and FakeVartree on the heap, replace
6949                                 # the RootConfig that references the FakeVartree with the
6950                                 # original RootConfig instance which references the actual
6951                                 # vartree.
6952                                 node.root_config = \
6953                                         self._trees_orig[node.root_config.root]["root_config"]
6954
6955         def _resolve_conflicts(self):
6956                 if not self._complete_graph():
6957                         raise self._unknown_internal_error()
6958
6959                 if not self.validate_blockers():
6960                         raise self._unknown_internal_error()
6961
6962                 if self._slot_collision_info:
6963                         self._process_slot_conflicts()
6964
6965         def _serialize_tasks(self):
6966
6967                 if "--debug" in self.myopts:
6968                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6969                         self.digraph.debug_print()
6970                         writemsg("\n", noiselevel=-1)
6971
6972                 scheduler_graph = self.digraph.copy()
6973                 mygraph=self.digraph.copy()
6974                 # Prune "nomerge" root nodes if nothing depends on them, since
6975                 # otherwise they slow down merge order calculation. Don't remove
6976                 # non-root nodes since they help optimize merge order in some cases
6977                 # such as revdep-rebuild.
6978                 removed_nodes = set()
6979                 while True:
6980                         for node in mygraph.root_nodes():
6981                                 if not isinstance(node, Package) or \
6982                                         node.installed or node.onlydeps:
6983                                         removed_nodes.add(node)
6984                         if removed_nodes:
6985                                 self.spinner.update()
6986                                 mygraph.difference_update(removed_nodes)
6987                         if not removed_nodes:
6988                                 break
6989                         removed_nodes.clear()
6990                 self._merge_order_bias(mygraph)
6991                 def cmp_circular_bias(n1, n2):
6992                         """
6993                         RDEPEND is stronger than PDEPEND and this function
6994                         measures such a strength bias within a circular
6995                         dependency relationship.
6996                         """
6997                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6998                                 ignore_priority=priority_range.ignore_medium_soft)
6999                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7000                                 ignore_priority=priority_range.ignore_medium_soft)
7001                         if n1_n2_medium == n2_n1_medium:
7002                                 return 0
7003                         elif n1_n2_medium:
7004                                 return 1
7005                         return -1
7006                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7007                 retlist=[]
7008                 # Contains uninstall tasks that have been scheduled to
7009                 # occur after overlapping blockers have been installed.
7010                 scheduled_uninstalls = set()
7011                 # Contains any Uninstall tasks that have been ignored
7012                 # in order to avoid the circular deps code path. These
7013                 # correspond to blocker conflicts that could not be
7014                 # resolved.
7015                 ignored_uninstall_tasks = set()
7016                 have_uninstall_task = False
7017                 complete = "complete" in self.myparams
7018                 asap_nodes = []
7019
7020                 def get_nodes(**kwargs):
7021                         """
7022                         Returns leaf nodes excluding Uninstall instances
7023                         since those should be executed as late as possible.
7024                         """
7025                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7026                                 if isinstance(node, Package) and \
7027                                         (node.operation != "uninstall" or \
7028                                         node in scheduled_uninstalls)]
7029
7030                 # sys-apps/portage needs special treatment if ROOT="/"
7031                 running_root = self._running_root.root
7032                 from portage.const import PORTAGE_PACKAGE_ATOM
7033                 runtime_deps = InternalPackageSet(
7034                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7035                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7036                         PORTAGE_PACKAGE_ATOM)
7037                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7038                         PORTAGE_PACKAGE_ATOM)
7039
7040                 if running_portage:
7041                         running_portage = running_portage[0]
7042                 else:
7043                         running_portage = None
7044
7045                 if replacement_portage:
7046                         replacement_portage = replacement_portage[0]
7047                 else:
7048                         replacement_portage = None
7049
7050                 if replacement_portage == running_portage:
7051                         replacement_portage = None
7052
7053                 if replacement_portage is not None:
7054                         # update from running_portage to replacement_portage asap
7055                         asap_nodes.append(replacement_portage)
7056
7057                 if running_portage is not None:
7058                         try:
7059                                 portage_rdepend = self._select_atoms_highest_available(
7060                                         running_root, running_portage.metadata["RDEPEND"],
7061                                         myuse=running_portage.use.enabled,
7062                                         parent=running_portage, strict=False)
7063                         except portage.exception.InvalidDependString, e:
7064                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7065                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7066                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7067                                 del e
7068                                 portage_rdepend = []
7069                         runtime_deps.update(atom for atom in portage_rdepend \
7070                                 if not atom.startswith("!"))
7071
7072                 def gather_deps(ignore_priority, mergeable_nodes,
7073                         selected_nodes, node):
7074                         """
7075                         Recursively gather a group of nodes that RDEPEND on
7076                         eachother. This ensures that they are merged as a group
7077                         and get their RDEPENDs satisfied as soon as possible.
7078                         """
7079                         if node in selected_nodes:
7080                                 return True
7081                         if node not in mergeable_nodes:
7082                                 return False
7083                         if node == replacement_portage and \
7084                                 mygraph.child_nodes(node,
7085                                 ignore_priority=priority_range.ignore_medium_soft):
7086                                 # Make sure that portage always has all of it's
7087                                 # RDEPENDs installed first.
7088                                 return False
7089                         selected_nodes.add(node)
7090                         for child in mygraph.child_nodes(node,
7091                                 ignore_priority=ignore_priority):
7092                                 if not gather_deps(ignore_priority,
7093                                         mergeable_nodes, selected_nodes, child):
7094                                         return False
7095                         return True
7096
7097                 def ignore_uninst_or_med(priority):
7098                         if priority is BlockerDepPriority.instance:
7099                                 return True
7100                         return priority_range.ignore_medium(priority)
7101
7102                 def ignore_uninst_or_med_soft(priority):
7103                         if priority is BlockerDepPriority.instance:
7104                                 return True
7105                         return priority_range.ignore_medium_soft(priority)
7106
7107                 tree_mode = "--tree" in self.myopts
7108                 # Tracks whether or not the current iteration should prefer asap_nodes
7109                 # if available.  This is set to False when the previous iteration
7110                 # failed to select any nodes.  It is reset whenever nodes are
7111                 # successfully selected.
7112                 prefer_asap = True
7113
7114                 # Controls whether or not the current iteration should drop edges that
7115                 # are "satisfied" by installed packages, in order to solve circular
7116                 # dependencies. The deep runtime dependencies of installed packages are
7117                 # not checked in this case (bug #199856), so it must be avoided
7118                 # whenever possible.
7119                 drop_satisfied = False
7120
7121                 # State of variables for successive iterations that loosen the
7122                 # criteria for node selection.
7123                 #
7124                 # iteration   prefer_asap   drop_satisfied
7125                 # 1           True          False
7126                 # 2           False         False
7127                 # 3           False         True
7128                 #
7129                 # If no nodes are selected on the last iteration, it is due to
7130                 # unresolved blockers or circular dependencies.
7131
7132                 while not mygraph.empty():
7133                         self.spinner.update()
7134                         selected_nodes = None
7135                         ignore_priority = None
7136                         if drop_satisfied or (prefer_asap and asap_nodes):
7137                                 priority_range = DepPrioritySatisfiedRange
7138                         else:
7139                                 priority_range = DepPriorityNormalRange
7140                         if prefer_asap and asap_nodes:
7141                                 # ASAP nodes are merged before their soft deps. Go ahead and
7142                                 # select root nodes here if necessary, since it's typical for
7143                                 # the parent to have been removed from the graph already.
7144                                 asap_nodes = [node for node in asap_nodes \
7145                                         if mygraph.contains(node)]
7146                                 for node in asap_nodes:
7147                                         if not mygraph.child_nodes(node,
7148                                                 ignore_priority=priority_range.ignore_soft):
7149                                                 selected_nodes = [node]
7150                                                 asap_nodes.remove(node)
7151                                                 break
7152                         if not selected_nodes and \
7153                                 not (prefer_asap and asap_nodes):
7154                                 for i in xrange(priority_range.NONE,
7155                                         priority_range.MEDIUM_SOFT + 1):
7156                                         ignore_priority = priority_range.ignore_priority[i]
7157                                         nodes = get_nodes(ignore_priority=ignore_priority)
7158                                         if nodes:
7159                                                 # If there is a mix of uninstall nodes with other
7160                                                 # types, save the uninstall nodes for later since
7161                                                 # sometimes a merge node will render an uninstall
7162                                                 # node unnecessary (due to occupying the same slot),
7163                                                 # and we want to avoid executing a separate uninstall
7164                                                 # task in that case.
7165                                                 if len(nodes) > 1:
7166                                                         good_uninstalls = []
7167                                                         with_some_uninstalls_excluded = []
7168                                                         for node in nodes:
7169                                                                 if node.operation == "uninstall":
7170                                                                         slot_node = self.mydbapi[node.root
7171                                                                                 ].match_pkgs(node.slot_atom)
7172                                                                         if slot_node and \
7173                                                                                 slot_node[0].operation == "merge":
7174                                                                                 continue
7175                                                                         good_uninstalls.append(node)
7176                                                                 with_some_uninstalls_excluded.append(node)
7177                                                         if good_uninstalls:
7178                                                                 nodes = good_uninstalls
7179                                                         elif with_some_uninstalls_excluded:
7180                                                                 nodes = with_some_uninstalls_excluded
7181                                                         else:
7182                                                                 nodes = nodes
7183
7184                                                 if ignore_priority is None and not tree_mode:
7185                                                         # Greedily pop all of these nodes since no
7186                                                         # relationship has been ignored. This optimization
7187                                                         # destroys --tree output, so it's disabled in tree
7188                                                         # mode.
7189                                                         selected_nodes = nodes
7190                                                 else:
7191                                                         # For optimal merge order:
7192                                                         #  * Only pop one node.
7193                                                         #  * Removing a root node (node without a parent)
7194                                                         #    will not produce a leaf node, so avoid it.
7195                                                         #  * It's normal for a selected uninstall to be a
7196                                                         #    root node, so don't check them for parents.
7197                                                         for node in nodes:
7198                                                                 if node.operation == "uninstall" or \
7199                                                                         mygraph.parent_nodes(node):
7200                                                                         selected_nodes = [node]
7201                                                                         break
7202
7203                                                 if selected_nodes:
7204                                                         break
7205
7206                         if not selected_nodes:
7207                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7208                                 if nodes:
7209                                         mergeable_nodes = set(nodes)
7210                                         if prefer_asap and asap_nodes:
7211                                                 nodes = asap_nodes
7212                                         for i in xrange(priority_range.SOFT,
7213                                                 priority_range.MEDIUM_SOFT + 1):
7214                                                 ignore_priority = priority_range.ignore_priority[i]
7215                                                 for node in nodes:
7216                                                         if not mygraph.parent_nodes(node):
7217                                                                 continue
7218                                                         selected_nodes = set()
7219                                                         if gather_deps(ignore_priority,
7220                                                                 mergeable_nodes, selected_nodes, node):
7221                                                                 break
7222                                                         else:
7223                                                                 selected_nodes = None
7224                                                 if selected_nodes:
7225                                                         break
7226
7227                                         if prefer_asap and asap_nodes and not selected_nodes:
7228                                                 # We failed to find any asap nodes to merge, so ignore
7229                                                 # them for the next iteration.
7230                                                 prefer_asap = False
7231                                                 continue
7232
7233                         if selected_nodes and ignore_priority is not None:
7234                                 # Try to merge ignored medium_soft deps as soon as possible
7235                                 # if they're not satisfied by installed packages.
7236                                 for node in selected_nodes:
7237                                         children = set(mygraph.child_nodes(node))
7238                                         soft = children.difference(
7239                                                 mygraph.child_nodes(node,
7240                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7241                                         medium_soft = children.difference(
7242                                                 mygraph.child_nodes(node,
7243                                                         ignore_priority = \
7244                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7245                                         medium_soft.difference_update(soft)
7246                                         for child in medium_soft:
7247                                                 if child in selected_nodes:
7248                                                         continue
7249                                                 if child in asap_nodes:
7250                                                         continue
7251                                                 asap_nodes.append(child)
7252
7253                         if selected_nodes and len(selected_nodes) > 1:
7254                                 if not isinstance(selected_nodes, list):
7255                                         selected_nodes = list(selected_nodes)
7256                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7257
7258                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7259                                 # An Uninstall task needs to be executed in order to
7260                                 # avoid conflict if possible.
7261
7262                                 if drop_satisfied:
7263                                         priority_range = DepPrioritySatisfiedRange
7264                                 else:
7265                                         priority_range = DepPriorityNormalRange
7266
7267                                 mergeable_nodes = get_nodes(
7268                                         ignore_priority=ignore_uninst_or_med)
7269
7270                                 min_parent_deps = None
7271                                 uninst_task = None
7272                                 for task in myblocker_uninstalls.leaf_nodes():
7273                                         # Do some sanity checks so that system or world packages
7274                                         # don't get uninstalled inappropriately here (only really
7275                                         # necessary when --complete-graph has not been enabled).
7276
7277                                         if task in ignored_uninstall_tasks:
7278                                                 continue
7279
7280                                         if task in scheduled_uninstalls:
7281                                                 # It's been scheduled but it hasn't
7282                                                 # been executed yet due to dependence
7283                                                 # on installation of blocking packages.
7284                                                 continue
7285
7286                                         root_config = self.roots[task.root]
7287                                         inst_pkg = self._pkg_cache[
7288                                                 ("installed", task.root, task.cpv, "nomerge")]
7289
7290                                         if self.digraph.contains(inst_pkg):
7291                                                 continue
7292
7293                                         forbid_overlap = False
7294                                         heuristic_overlap = False
7295                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7296                                                 if blocker.eapi in ("0", "1"):
7297                                                         heuristic_overlap = True
7298                                                 elif blocker.atom.blocker.overlap.forbid:
7299                                                         forbid_overlap = True
7300                                                         break
7301                                         if forbid_overlap and running_root == task.root:
7302                                                 continue
7303
7304                                         if heuristic_overlap and running_root == task.root:
7305                                                 # Never uninstall sys-apps/portage or it's essential
7306                                                 # dependencies, except through replacement.
7307                                                 try:
7308                                                         runtime_dep_atoms = \
7309                                                                 list(runtime_deps.iterAtomsForPackage(task))
7310                                                 except portage.exception.InvalidDependString, e:
7311                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7312                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7313                                                                 (task.root, task.cpv, e), noiselevel=-1)
7314                                                         del e
7315                                                         continue
7316
7317                                                 # Don't uninstall a runtime dep if it appears
7318                                                 # to be the only suitable one installed.
7319                                                 skip = False
7320                                                 vardb = root_config.trees["vartree"].dbapi
7321                                                 for atom in runtime_dep_atoms:
7322                                                         other_version = None
7323                                                         for pkg in vardb.match_pkgs(atom):
7324                                                                 if pkg.cpv == task.cpv and \
7325                                                                         pkg.metadata["COUNTER"] == \
7326                                                                         task.metadata["COUNTER"]:
7327                                                                         continue
7328                                                                 other_version = pkg
7329                                                                 break
7330                                                         if other_version is None:
7331                                                                 skip = True
7332                                                                 break
7333                                                 if skip:
7334                                                         continue
7335
7336                                                 # For packages in the system set, don't take
7337                                                 # any chances. If the conflict can't be resolved
7338                                                 # by a normal replacement operation then abort.
7339                                                 skip = False
7340                                                 try:
7341                                                         for atom in root_config.sets[
7342                                                                 "system"].iterAtomsForPackage(task):
7343                                                                 skip = True
7344                                                                 break
7345                                                 except portage.exception.InvalidDependString, e:
7346                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7347                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7348                                                                 (task.root, task.cpv, e), noiselevel=-1)
7349                                                         del e
7350                                                         skip = True
7351                                                 if skip:
7352                                                         continue
7353
7354                                         # Note that the world check isn't always
7355                                         # necessary since self._complete_graph() will
7356                                         # add all packages from the system and world sets to the
7357                                         # graph. This just allows unresolved conflicts to be
7358                                         # detected as early as possible, which makes it possible
7359                                         # to avoid calling self._complete_graph() when it is
7360                                         # unnecessary due to blockers triggering an abortion.
7361                                         if not complete:
7362                                                 # For packages in the world set, go ahead an uninstall
7363                                                 # when necessary, as long as the atom will be satisfied
7364                                                 # in the final state.
7365                                                 graph_db = self.mydbapi[task.root]
7366                                                 skip = False
7367                                                 try:
7368                                                         for atom in root_config.sets[
7369                                                                 "world"].iterAtomsForPackage(task):
7370                                                                 satisfied = False
7371                                                                 for pkg in graph_db.match_pkgs(atom):
7372                                                                         if pkg == inst_pkg:
7373                                                                                 continue
7374                                                                         satisfied = True
7375                                                                         break
7376                                                                 if not satisfied:
7377                                                                         skip = True
7378                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7379                                                                         break
7380                                                 except portage.exception.InvalidDependString, e:
7381                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7382                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7383                                                                 (task.root, task.cpv, e), noiselevel=-1)
7384                                                         del e
7385                                                         skip = True
7386                                                 if skip:
7387                                                         continue
7388
7389                                         # Check the deps of parent nodes to ensure that
7390                                         # the chosen task produces a leaf node. Maybe
7391                                         # this can be optimized some more to make the
7392                                         # best possible choice, but the current algorithm
7393                                         # is simple and should be near optimal for most
7394                                         # common cases.
7395                                         mergeable_parent = False
7396                                         parent_deps = set()
7397                                         for parent in mygraph.parent_nodes(task):
7398                                                 parent_deps.update(mygraph.child_nodes(parent,
7399                                                         ignore_priority=priority_range.ignore_medium_soft))
7400                                                 if parent in mergeable_nodes and \
7401                                                         gather_deps(ignore_uninst_or_med_soft,
7402                                                         mergeable_nodes, set(), parent):
7403                                                         mergeable_parent = True
7404
7405                                         if not mergeable_parent:
7406                                                 continue
7407
7408                                         parent_deps.remove(task)
7409                                         if min_parent_deps is None or \
7410                                                 len(parent_deps) < min_parent_deps:
7411                                                 min_parent_deps = len(parent_deps)
7412                                                 uninst_task = task
7413
7414                                 if uninst_task is not None:
7415                                         # The uninstall is performed only after blocking
7416                                         # packages have been merged on top of it. File
7417                                         # collisions between blocking packages are detected
7418                                         # and removed from the list of files to be uninstalled.
7419                                         scheduled_uninstalls.add(uninst_task)
7420                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7421
7422                                         # Reverse the parent -> uninstall edges since we want
7423                                         # to do the uninstall after blocking packages have
7424                                         # been merged on top of it.
7425                                         mygraph.remove(uninst_task)
7426                                         for blocked_pkg in parent_nodes:
7427                                                 mygraph.add(blocked_pkg, uninst_task,
7428                                                         priority=BlockerDepPriority.instance)
7429                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7430                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7431                                                         priority=BlockerDepPriority.instance)
7432
7433                                         # Reset the state variables for leaf node selection and
7434                                         # continue trying to select leaf nodes.
7435                                         prefer_asap = True
7436                                         drop_satisfied = False
7437                                         continue
7438
7439                         if not selected_nodes:
7440                                 # Only select root nodes as a last resort. This case should
7441                                 # only trigger when the graph is nearly empty and the only
7442                                 # remaining nodes are isolated (no parents or children). Since
7443                                 # the nodes must be isolated, ignore_priority is not needed.
7444                                 selected_nodes = get_nodes()
7445
7446                         if not selected_nodes and not drop_satisfied:
7447                                 drop_satisfied = True
7448                                 continue
7449
7450                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7451                                 # If possible, drop an uninstall task here in order to avoid
7452                                 # the circular deps code path. The corresponding blocker will
7453                                 # still be counted as an unresolved conflict.
7454                                 uninst_task = None
7455                                 for node in myblocker_uninstalls.leaf_nodes():
7456                                         try:
7457                                                 mygraph.remove(node)
7458                                         except KeyError:
7459                                                 pass
7460                                         else:
7461                                                 uninst_task = node
7462                                                 ignored_uninstall_tasks.add(node)
7463                                                 break
7464
7465                                 if uninst_task is not None:
7466                                         # Reset the state variables for leaf node selection and
7467                                         # continue trying to select leaf nodes.
7468                                         prefer_asap = True
7469                                         drop_satisfied = False
7470                                         continue
7471
7472                         if not selected_nodes:
7473                                 self._circular_deps_for_display = mygraph
7474                                 raise self._unknown_internal_error()
7475
7476                         # At this point, we've succeeded in selecting one or more nodes, so
7477                         # reset state variables for leaf node selection.
7478                         prefer_asap = True
7479                         drop_satisfied = False
7480
7481                         mygraph.difference_update(selected_nodes)
7482
7483                         for node in selected_nodes:
7484                                 if isinstance(node, Package) and \
7485                                         node.operation == "nomerge":
7486                                         continue
7487
7488                                 # Handle interactions between blockers
7489                                 # and uninstallation tasks.
7490                                 solved_blockers = set()
7491                                 uninst_task = None
7492                                 if isinstance(node, Package) and \
7493                                         "uninstall" == node.operation:
7494                                         have_uninstall_task = True
7495                                         uninst_task = node
7496                                 else:
7497                                         vardb = self.trees[node.root]["vartree"].dbapi
7498                                         previous_cpv = vardb.match(node.slot_atom)
7499                                         if previous_cpv:
7500                                                 # The package will be replaced by this one, so remove
7501                                                 # the corresponding Uninstall task if necessary.
7502                                                 previous_cpv = previous_cpv[0]
7503                                                 uninst_task = \
7504                                                         ("installed", node.root, previous_cpv, "uninstall")
7505                                                 try:
7506                                                         mygraph.remove(uninst_task)
7507                                                 except KeyError:
7508                                                         pass
7509
7510                                 if uninst_task is not None and \
7511                                         uninst_task not in ignored_uninstall_tasks and \
7512                                         myblocker_uninstalls.contains(uninst_task):
7513                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7514                                         myblocker_uninstalls.remove(uninst_task)
7515                                         # Discard any blockers that this Uninstall solves.
7516                                         for blocker in blocker_nodes:
7517                                                 if not myblocker_uninstalls.child_nodes(blocker):
7518                                                         myblocker_uninstalls.remove(blocker)
7519                                                         solved_blockers.add(blocker)
7520
7521                                 retlist.append(node)
7522
7523                                 if (isinstance(node, Package) and \
7524                                         "uninstall" == node.operation) or \
7525                                         (uninst_task is not None and \
7526                                         uninst_task in scheduled_uninstalls):
7527                                         # Include satisfied blockers in the merge list
7528                                         # since the user might be interested and also
7529                                         # it serves as an indicator that blocking packages
7530                                         # will be temporarily installed simultaneously.
7531                                         for blocker in solved_blockers:
7532                                                 retlist.append(Blocker(atom=blocker.atom,
7533                                                         root=blocker.root, eapi=blocker.eapi,
7534                                                         satisfied=True))
7535
7536                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7537                 for node in myblocker_uninstalls.root_nodes():
7538                         unsolvable_blockers.add(node)
7539
7540                 for blocker in unsolvable_blockers:
7541                         retlist.append(blocker)
7542
7543                 # If any Uninstall tasks need to be executed in order
7544                 # to avoid a conflict, complete the graph with any
7545                 # dependencies that may have been initially
7546                 # neglected (to ensure that unsafe Uninstall tasks
7547                 # are properly identified and blocked from execution).
7548                 if have_uninstall_task and \
7549                         not complete and \
7550                         not unsolvable_blockers:
7551                         self.myparams.add("complete")
7552                         raise self._serialize_tasks_retry("")
7553
7554                 if unsolvable_blockers and \
7555                         not self._accept_blocker_conflicts():
7556                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7557                         self._serialized_tasks_cache = retlist[:]
7558                         self._scheduler_graph = scheduler_graph
7559                         raise self._unknown_internal_error()
7560
7561                 if self._slot_collision_info and \
7562                         not self._accept_blocker_conflicts():
7563                         self._serialized_tasks_cache = retlist[:]
7564                         self._scheduler_graph = scheduler_graph
7565                         raise self._unknown_internal_error()
7566
7567                 return retlist, scheduler_graph
7568
7569         def _show_circular_deps(self, mygraph):
7570                 # No leaf nodes are available, so we have a circular
7571                 # dependency panic situation.  Reduce the noise level to a
7572                 # minimum via repeated elimination of root nodes since they
7573                 # have no parents and thus can not be part of a cycle.
7574                 while True:
7575                         root_nodes = mygraph.root_nodes(
7576                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7577                         if not root_nodes:
7578                                 break
7579                         mygraph.difference_update(root_nodes)
7580                 # Display the USE flags that are enabled on nodes that are part
7581                 # of dependency cycles in case that helps the user decide to
7582                 # disable some of them.
7583                 display_order = []
7584                 tempgraph = mygraph.copy()
7585                 while not tempgraph.empty():
7586                         nodes = tempgraph.leaf_nodes()
7587                         if not nodes:
7588                                 node = tempgraph.order[0]
7589                         else:
7590                                 node = nodes[0]
7591                         display_order.append(node)
7592                         tempgraph.remove(node)
7593                 display_order.reverse()
7594                 self.myopts.pop("--quiet", None)
7595                 self.myopts.pop("--verbose", None)
7596                 self.myopts["--tree"] = True
7597                 portage.writemsg("\n\n", noiselevel=-1)
7598                 self.display(display_order)
7599                 prefix = colorize("BAD", " * ")
7600                 portage.writemsg("\n", noiselevel=-1)
7601                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7602                         noiselevel=-1)
7603                 portage.writemsg("\n", noiselevel=-1)
7604                 mygraph.debug_print()
7605                 portage.writemsg("\n", noiselevel=-1)
7606                 portage.writemsg(prefix + "Note that circular dependencies " + \
7607                         "can often be avoided by temporarily\n", noiselevel=-1)
7608                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7609                         "optional dependencies.\n", noiselevel=-1)
7610
7611         def _show_merge_list(self):
7612                 if self._serialized_tasks_cache is not None and \
7613                         not (self._displayed_list and \
7614                         (self._displayed_list == self._serialized_tasks_cache or \
7615                         self._displayed_list == \
7616                                 list(reversed(self._serialized_tasks_cache)))):
7617                         display_list = self._serialized_tasks_cache[:]
7618                         if "--tree" in self.myopts:
7619                                 display_list.reverse()
7620                         self.display(display_list)
7621
7622         def _show_unsatisfied_blockers(self, blockers):
7623                 self._show_merge_list()
7624                 msg = "Error: The above package list contains " + \
7625                         "packages which cannot be installed " + \
7626                         "at the same time on the same system."
7627                 prefix = colorize("BAD", " * ")
7628                 from textwrap import wrap
7629                 portage.writemsg("\n", noiselevel=-1)
7630                 for line in wrap(msg, 70):
7631                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7632
7633                 # Display the conflicting packages along with the packages
7634                 # that pulled them in. This is helpful for troubleshooting
7635                 # cases in which blockers don't solve automatically and
7636                 # the reasons are not apparent from the normal merge list
7637                 # display.
7638
7639                 conflict_pkgs = {}
7640                 for blocker in blockers:
7641                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7642                                 self._blocker_parents.parent_nodes(blocker)):
7643                                 parent_atoms = self._parent_atoms.get(pkg)
7644                                 if not parent_atoms:
7645                                         atom = self._blocked_world_pkgs.get(pkg)
7646                                         if atom is not None:
7647                                                 parent_atoms = set([("@world", atom)])
7648                                 if parent_atoms:
7649                                         conflict_pkgs[pkg] = parent_atoms
7650
7651                 if conflict_pkgs:
7652                         # Reduce noise by pruning packages that are only
7653                         # pulled in by other conflict packages.
7654                         pruned_pkgs = set()
7655                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7656                                 relevant_parent = False
7657                                 for parent, atom in parent_atoms:
7658                                         if parent not in conflict_pkgs:
7659                                                 relevant_parent = True
7660                                                 break
7661                                 if not relevant_parent:
7662                                         pruned_pkgs.add(pkg)
7663                         for pkg in pruned_pkgs:
7664                                 del conflict_pkgs[pkg]
7665
7666                 if conflict_pkgs:
7667                         msg = []
7668                         msg.append("\n")
7669                         indent = "  "
7670                         # Max number of parents shown, to avoid flooding the display.
7671                         max_parents = 3
7672                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7673
7674                                 pruned_list = set()
7675
7676                                 # Prefer packages that are not directly involved in a conflict.
7677                                 for parent_atom in parent_atoms:
7678                                         if len(pruned_list) >= max_parents:
7679                                                 break
7680                                         parent, atom = parent_atom
7681                                         if parent not in conflict_pkgs:
7682                                                 pruned_list.add(parent_atom)
7683
7684                                 for parent_atom in parent_atoms:
7685                                         if len(pruned_list) >= max_parents:
7686                                                 break
7687                                         pruned_list.add(parent_atom)
7688
7689                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7690                                 msg.append(indent + "%s pulled in by\n" % pkg)
7691
7692                                 for parent_atom in pruned_list:
7693                                         parent, atom = parent_atom
7694                                         msg.append(2*indent)
7695                                         if isinstance(parent,
7696                                                 (PackageArg, AtomArg)):
7697                                                 # For PackageArg and AtomArg types, it's
7698                                                 # redundant to display the atom attribute.
7699                                                 msg.append(str(parent))
7700                                         else:
7701                                                 # Display the specific atom from SetArg or
7702                                                 # Package types.
7703                                                 msg.append("%s required by %s" % (atom, parent))
7704                                         msg.append("\n")
7705
7706                                 if omitted_parents:
7707                                         msg.append(2*indent)
7708                                         msg.append("(and %d more)\n" % omitted_parents)
7709
7710                                 msg.append("\n")
7711
7712                         sys.stderr.write("".join(msg))
7713                         sys.stderr.flush()
7714
7715                 if "--quiet" not in self.myopts:
7716                         show_blocker_docs_link()
7717
7718         def display(self, mylist, favorites=[], verbosity=None):
7719
7720                 # This is used to prevent display_problems() from
7721                 # redundantly displaying this exact same merge list
7722                 # again via _show_merge_list().
7723                 self._displayed_list = mylist
7724
7725                 if verbosity is None:
7726                         verbosity = ("--quiet" in self.myopts and 1 or \
7727                                 "--verbose" in self.myopts and 3 or 2)
7728                 favorites_set = InternalPackageSet(favorites)
7729                 oneshot = "--oneshot" in self.myopts or \
7730                         "--onlydeps" in self.myopts
7731                 columns = "--columns" in self.myopts
7732                 changelogs=[]
7733                 p=[]
7734                 blockers = []
7735
7736                 counters = PackageCounters()
7737
7738                 if verbosity == 1 and "--verbose" not in self.myopts:
7739                         def create_use_string(*args):
7740                                 return ""
7741                 else:
7742                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7743                                 old_iuse, old_use,
7744                                 is_new, reinst_flags,
7745                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7746                                 alphabetical=("--alphabetical" in self.myopts)):
7747                                 enabled = []
7748                                 if alphabetical:
7749                                         disabled = enabled
7750                                         removed = enabled
7751                                 else:
7752                                         disabled = []
7753                                         removed = []
7754                                 cur_iuse = set(cur_iuse)
7755                                 enabled_flags = cur_iuse.intersection(cur_use)
7756                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7757                                 any_iuse = cur_iuse.union(old_iuse)
7758                                 any_iuse = list(any_iuse)
7759                                 any_iuse.sort()
7760                                 for flag in any_iuse:
7761                                         flag_str = None
7762                                         isEnabled = False
7763                                         reinst_flag = reinst_flags and flag in reinst_flags
7764                                         if flag in enabled_flags:
7765                                                 isEnabled = True
7766                                                 if is_new or flag in old_use and \
7767                                                         (all_flags or reinst_flag):
7768                                                         flag_str = red(flag)
7769                                                 elif flag not in old_iuse:
7770                                                         flag_str = yellow(flag) + "%*"
7771                                                 elif flag not in old_use:
7772                                                         flag_str = green(flag) + "*"
7773                                         elif flag in removed_iuse:
7774                                                 if all_flags or reinst_flag:
7775                                                         flag_str = yellow("-" + flag) + "%"
7776                                                         if flag in old_use:
7777                                                                 flag_str += "*"
7778                                                         flag_str = "(" + flag_str + ")"
7779                                                         removed.append(flag_str)
7780                                                 continue
7781                                         else:
7782                                                 if is_new or flag in old_iuse and \
7783                                                         flag not in old_use and \
7784                                                         (all_flags or reinst_flag):
7785                                                         flag_str = blue("-" + flag)
7786                                                 elif flag not in old_iuse:
7787                                                         flag_str = yellow("-" + flag)
7788                                                         if flag not in iuse_forced:
7789                                                                 flag_str += "%"
7790                                                 elif flag in old_use:
7791                                                         flag_str = green("-" + flag) + "*"
7792                                         if flag_str:
7793                                                 if flag in iuse_forced:
7794                                                         flag_str = "(" + flag_str + ")"
7795                                                 if isEnabled:
7796                                                         enabled.append(flag_str)
7797                                                 else:
7798                                                         disabled.append(flag_str)
7799
7800                                 if alphabetical:
7801                                         ret = " ".join(enabled)
7802                                 else:
7803                                         ret = " ".join(enabled + disabled + removed)
7804                                 if ret:
7805                                         ret = '%s="%s" ' % (name, ret)
7806                                 return ret
7807
7808                 repo_display = RepoDisplay(self.roots)
7809
7810                 tree_nodes = []
7811                 display_list = []
7812                 mygraph = self.digraph.copy()
7813
7814                 # If there are any Uninstall instances, add the corresponding
7815                 # blockers to the digraph (useful for --tree display).
7816
7817                 executed_uninstalls = set(node for node in mylist \
7818                         if isinstance(node, Package) and node.operation == "unmerge")
7819
7820                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7821                         uninstall_parents = \
7822                                 self._blocker_uninstalls.parent_nodes(uninstall)
7823                         if not uninstall_parents:
7824                                 continue
7825
7826                         # Remove the corresponding "nomerge" node and substitute
7827                         # the Uninstall node.
7828                         inst_pkg = self._pkg_cache[
7829                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7830                         try:
7831                                 mygraph.remove(inst_pkg)
7832                         except KeyError:
7833                                 pass
7834
7835                         try:
7836                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7837                         except KeyError:
7838                                 inst_pkg_blockers = []
7839
7840                         # Break the Package -> Uninstall edges.
7841                         mygraph.remove(uninstall)
7842
7843                         # Resolution of a package's blockers
7844                         # depend on it's own uninstallation.
7845                         for blocker in inst_pkg_blockers:
7846                                 mygraph.add(uninstall, blocker)
7847
7848                         # Expand Package -> Uninstall edges into
7849                         # Package -> Blocker -> Uninstall edges.
7850                         for blocker in uninstall_parents:
7851                                 mygraph.add(uninstall, blocker)
7852                                 for parent in self._blocker_parents.parent_nodes(blocker):
7853                                         if parent != inst_pkg:
7854                                                 mygraph.add(blocker, parent)
7855
7856                         # If the uninstall task did not need to be executed because
7857                         # of an upgrade, display Blocker -> Upgrade edges since the
7858                         # corresponding Blocker -> Uninstall edges will not be shown.
7859                         upgrade_node = \
7860                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7861                         if upgrade_node is not None and \
7862                                 uninstall not in executed_uninstalls:
7863                                 for blocker in uninstall_parents:
7864                                         mygraph.add(upgrade_node, blocker)
7865
7866                 unsatisfied_blockers = []
7867                 i = 0
7868                 depth = 0
7869                 shown_edges = set()
7870                 for x in mylist:
7871                         if isinstance(x, Blocker) and not x.satisfied:
7872                                 unsatisfied_blockers.append(x)
7873                                 continue
7874                         graph_key = x
7875                         if "--tree" in self.myopts:
7876                                 depth = len(tree_nodes)
7877                                 while depth and graph_key not in \
7878                                         mygraph.child_nodes(tree_nodes[depth-1]):
7879                                                 depth -= 1
7880                                 if depth:
7881                                         tree_nodes = tree_nodes[:depth]
7882                                         tree_nodes.append(graph_key)
7883                                         display_list.append((x, depth, True))
7884                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7885                                 else:
7886                                         traversed_nodes = set() # prevent endless circles
7887                                         traversed_nodes.add(graph_key)
7888                                         def add_parents(current_node, ordered):
7889                                                 parent_nodes = None
7890                                                 # Do not traverse to parents if this node is an
7891                                                 # an argument or a direct member of a set that has
7892                                                 # been specified as an argument (system or world).
7893                                                 if current_node not in self._set_nodes:
7894                                                         parent_nodes = mygraph.parent_nodes(current_node)
7895                                                 if parent_nodes:
7896                                                         child_nodes = set(mygraph.child_nodes(current_node))
7897                                                         selected_parent = None
7898                                                         # First, try to avoid a direct cycle.
7899                                                         for node in parent_nodes:
7900                                                                 if not isinstance(node, (Blocker, Package)):
7901                                                                         continue
7902                                                                 if node not in traversed_nodes and \
7903                                                                         node not in child_nodes:
7904                                                                         edge = (current_node, node)
7905                                                                         if edge in shown_edges:
7906                                                                                 continue
7907                                                                         selected_parent = node
7908                                                                         break
7909                                                         if not selected_parent:
7910                                                                 # A direct cycle is unavoidable.
7911                                                                 for node in parent_nodes:
7912                                                                         if not isinstance(node, (Blocker, Package)):
7913                                                                                 continue
7914                                                                         if node not in traversed_nodes:
7915                                                                                 edge = (current_node, node)
7916                                                                                 if edge in shown_edges:
7917                                                                                         continue
7918                                                                                 selected_parent = node
7919                                                                                 break
7920                                                         if selected_parent:
7921                                                                 shown_edges.add((current_node, selected_parent))
7922                                                                 traversed_nodes.add(selected_parent)
7923                                                                 add_parents(selected_parent, False)
7924                                                 display_list.append((current_node,
7925                                                         len(tree_nodes), ordered))
7926                                                 tree_nodes.append(current_node)
7927                                         tree_nodes = []
7928                                         add_parents(graph_key, True)
7929                         else:
7930                                 display_list.append((x, depth, True))
7931                 mylist = display_list
7932                 for x in unsatisfied_blockers:
7933                         mylist.append((x, 0, True))
7934
7935                 last_merge_depth = 0
7936                 for i in xrange(len(mylist)-1,-1,-1):
7937                         graph_key, depth, ordered = mylist[i]
7938                         if not ordered and depth == 0 and i > 0 \
7939                                 and graph_key == mylist[i-1][0] and \
7940                                 mylist[i-1][1] == 0:
7941                                 # An ordered node got a consecutive duplicate when the tree was
7942                                 # being filled in.
7943                                 del mylist[i]
7944                                 continue
7945                         if ordered and graph_key[-1] != "nomerge":
7946                                 last_merge_depth = depth
7947                                 continue
7948                         if depth >= last_merge_depth or \
7949                                 i < len(mylist) - 1 and \
7950                                 depth >= mylist[i+1][1]:
7951                                         del mylist[i]
7952
7953                 from portage import flatten
7954                 from portage.dep import use_reduce, paren_reduce
7955                 # files to fetch list - avoids counting a same file twice
7956                 # in size display (verbose mode)
7957                 myfetchlist=[]
7958
7959                 # Use this set to detect when all the "repoadd" strings are "[0]"
7960                 # and disable the entire repo display in this case.
7961                 repoadd_set = set()
7962
7963                 for mylist_index in xrange(len(mylist)):
7964                         x, depth, ordered = mylist[mylist_index]
7965                         pkg_type = x[0]
7966                         myroot = x[1]
7967                         pkg_key = x[2]
7968                         portdb = self.trees[myroot]["porttree"].dbapi
7969                         bindb  = self.trees[myroot]["bintree"].dbapi
7970                         vardb = self.trees[myroot]["vartree"].dbapi
7971                         vartree = self.trees[myroot]["vartree"]
7972                         pkgsettings = self.pkgsettings[myroot]
7973
7974                         fetch=" "
7975                         indent = " " * depth
7976
7977                         if isinstance(x, Blocker):
7978                                 if x.satisfied:
7979                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7980                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7981                                 else:
7982                                         blocker_style = "PKG_BLOCKER"
7983                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7984                                 if ordered:
7985                                         counters.blocks += 1
7986                                         if x.satisfied:
7987                                                 counters.blocks_satisfied += 1
7988                                 resolved = portage.key_expand(
7989                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7990                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7991                                         addl += " " + colorize(blocker_style, resolved)
7992                                 else:
7993                                         addl = "[%s %s] %s%s" % \
7994                                                 (colorize(blocker_style, "blocks"),
7995                                                 addl, indent, colorize(blocker_style, resolved))
7996                                 block_parents = self._blocker_parents.parent_nodes(x)
7997                                 block_parents = set([pnode[2] for pnode in block_parents])
7998                                 block_parents = ", ".join(block_parents)
7999                                 if resolved!=x[2]:
8000                                         addl += colorize(blocker_style,
8001                                                 " (\"%s\" is blocking %s)") % \
8002                                                 (str(x.atom).lstrip("!"), block_parents)
8003                                 else:
8004                                         addl += colorize(blocker_style,
8005                                                 " (is blocking %s)") % block_parents
8006                                 if isinstance(x, Blocker) and x.satisfied:
8007                                         if columns:
8008                                                 continue
8009                                         p.append(addl)
8010                                 else:
8011                                         blockers.append(addl)
8012                         else:
8013                                 pkg_status = x[3]
8014                                 pkg_merge = ordered and pkg_status == "merge"
8015                                 if not pkg_merge and pkg_status == "merge":
8016                                         pkg_status = "nomerge"
8017                                 built = pkg_type != "ebuild"
8018                                 installed = pkg_type == "installed"
8019                                 pkg = x
8020                                 metadata = pkg.metadata
8021                                 ebuild_path = None
8022                                 repo_name = metadata["repository"]
8023                                 if pkg_type == "ebuild":
8024                                         ebuild_path = portdb.findname(pkg_key)
8025                                         if not ebuild_path: # shouldn't happen
8026                                                 raise portage.exception.PackageNotFound(pkg_key)
8027                                         repo_path_real = os.path.dirname(os.path.dirname(
8028                                                 os.path.dirname(ebuild_path)))
8029                                 else:
8030                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8031                                 pkg_use = list(pkg.use.enabled)
8032                                 try:
8033                                         restrict = flatten(use_reduce(paren_reduce(
8034                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8035                                 except portage.exception.InvalidDependString, e:
8036                                         if not pkg.installed:
8037                                                 show_invalid_depstring_notice(x,
8038                                                         pkg.metadata["RESTRICT"], str(e))
8039                                                 del e
8040                                                 return 1
8041                                         restrict = []
8042                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8043                                         "fetch" in restrict:
8044                                         fetch = red("F")
8045                                         if ordered:
8046                                                 counters.restrict_fetch += 1
8047                                         if portdb.fetch_check(pkg_key, pkg_use):
8048                                                 fetch = green("f")
8049                                                 if ordered:
8050                                                         counters.restrict_fetch_satisfied += 1
8051
8052                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8053                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8054                                 myoldbest = []
8055                                 myinslotlist = None
8056                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8057                                 if vardb.cpv_exists(pkg_key):
8058                                         addl="  "+yellow("R")+fetch+"  "
8059                                         if ordered:
8060                                                 if pkg_merge:
8061                                                         counters.reinst += 1
8062                                                 elif pkg_status == "uninstall":
8063                                                         counters.uninst += 1
8064                                 # filter out old-style virtual matches
8065                                 elif installed_versions and \
8066                                         portage.cpv_getkey(installed_versions[0]) == \
8067                                         portage.cpv_getkey(pkg_key):
8068                                         myinslotlist = vardb.match(pkg.slot_atom)
8069                                         # If this is the first install of a new-style virtual, we
8070                                         # need to filter out old-style virtual matches.
8071                                         if myinslotlist and \
8072                                                 portage.cpv_getkey(myinslotlist[0]) != \
8073                                                 portage.cpv_getkey(pkg_key):
8074                                                 myinslotlist = None
8075                                         if myinslotlist:
8076                                                 myoldbest = myinslotlist[:]
8077                                                 addl = "   " + fetch
8078                                                 if not portage.dep.cpvequal(pkg_key,
8079                                                         portage.best([pkg_key] + myoldbest)):
8080                                                         # Downgrade in slot
8081                                                         addl += turquoise("U")+blue("D")
8082                                                         if ordered:
8083                                                                 counters.downgrades += 1
8084                                                 else:
8085                                                         # Update in slot
8086                                                         addl += turquoise("U") + " "
8087                                                         if ordered:
8088                                                                 counters.upgrades += 1
8089                                         else:
8090                                                 # New slot, mark it new.
8091                                                 addl = " " + green("NS") + fetch + "  "
8092                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8093                                                 if ordered:
8094                                                         counters.newslot += 1
8095
8096                                         if "--changelog" in self.myopts:
8097                                                 inst_matches = vardb.match(pkg.slot_atom)
8098                                                 if inst_matches:
8099                                                         changelogs.extend(self.calc_changelog(
8100                                                                 portdb.findname(pkg_key),
8101                                                                 inst_matches[0], pkg_key))
8102                                 else:
8103                                         addl = " " + green("N") + " " + fetch + "  "
8104                                         if ordered:
8105                                                 counters.new += 1
8106
8107                                 verboseadd = ""
8108                                 repoadd = None
8109
8110                                 if True:
8111                                         # USE flag display
8112                                         forced_flags = set()
8113                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8114                                         forced_flags.update(pkgsettings.useforce)
8115                                         forced_flags.update(pkgsettings.usemask)
8116
8117                                         cur_use = [flag for flag in pkg.use.enabled \
8118                                                 if flag in pkg.iuse.all]
8119                                         cur_iuse = sorted(pkg.iuse.all)
8120
8121                                         if myoldbest and myinslotlist:
8122                                                 previous_cpv = myoldbest[0]
8123                                         else:
8124                                                 previous_cpv = pkg.cpv
8125                                         if vardb.cpv_exists(previous_cpv):
8126                                                 old_iuse, old_use = vardb.aux_get(
8127                                                                 previous_cpv, ["IUSE", "USE"])
8128                                                 old_iuse = list(set(
8129                                                         filter_iuse_defaults(old_iuse.split())))
8130                                                 old_iuse.sort()
8131                                                 old_use = old_use.split()
8132                                                 is_new = False
8133                                         else:
8134                                                 old_iuse = []
8135                                                 old_use = []
8136                                                 is_new = True
8137
8138                                         old_use = [flag for flag in old_use if flag in old_iuse]
8139
8140                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8141                                         use_expand.sort()
8142                                         use_expand.reverse()
8143                                         use_expand_hidden = \
8144                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8145
8146                                         def map_to_use_expand(myvals, forcedFlags=False,
8147                                                 removeHidden=True):
8148                                                 ret = {}
8149                                                 forced = {}
8150                                                 for exp in use_expand:
8151                                                         ret[exp] = []
8152                                                         forced[exp] = set()
8153                                                         for val in myvals[:]:
8154                                                                 if val.startswith(exp.lower()+"_"):
8155                                                                         if val in forced_flags:
8156                                                                                 forced[exp].add(val[len(exp)+1:])
8157                                                                         ret[exp].append(val[len(exp)+1:])
8158                                                                         myvals.remove(val)
8159                                                 ret["USE"] = myvals
8160                                                 forced["USE"] = [val for val in myvals \
8161                                                         if val in forced_flags]
8162                                                 if removeHidden:
8163                                                         for exp in use_expand_hidden:
8164                                                                 ret.pop(exp, None)
8165                                                 if forcedFlags:
8166                                                         return ret, forced
8167                                                 return ret
8168
8169                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8170                                         # are the only thing that triggered reinstallation.
8171                                         reinst_flags_map = {}
8172                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8173                                         reinst_expand_map = None
8174                                         if reinstall_for_flags:
8175                                                 reinst_flags_map = map_to_use_expand(
8176                                                         list(reinstall_for_flags), removeHidden=False)
8177                                                 for k in list(reinst_flags_map):
8178                                                         if not reinst_flags_map[k]:
8179                                                                 del reinst_flags_map[k]
8180                                                 if not reinst_flags_map.get("USE"):
8181                                                         reinst_expand_map = reinst_flags_map.copy()
8182                                                         reinst_expand_map.pop("USE", None)
8183                                         if reinst_expand_map and \
8184                                                 not set(reinst_expand_map).difference(
8185                                                 use_expand_hidden):
8186                                                 use_expand_hidden = \
8187                                                         set(use_expand_hidden).difference(
8188                                                         reinst_expand_map)
8189
8190                                         cur_iuse_map, iuse_forced = \
8191                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8192                                         cur_use_map = map_to_use_expand(cur_use)
8193                                         old_iuse_map = map_to_use_expand(old_iuse)
8194                                         old_use_map = map_to_use_expand(old_use)
8195
8196                                         use_expand.sort()
8197                                         use_expand.insert(0, "USE")
8198                                         
8199                                         for key in use_expand:
8200                                                 if key in use_expand_hidden:
8201                                                         continue
8202                                                 verboseadd += create_use_string(key.upper(),
8203                                                         cur_iuse_map[key], iuse_forced[key],
8204                                                         cur_use_map[key], old_iuse_map[key],
8205                                                         old_use_map[key], is_new,
8206                                                         reinst_flags_map.get(key))
8207
8208                                 if verbosity == 3:
8209                                         # size verbose
8210                                         mysize=0
8211                                         if pkg_type == "ebuild" and pkg_merge:
8212                                                 try:
8213                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8214                                                                 useflags=pkg_use, debug=self.edebug)
8215                                                 except portage.exception.InvalidDependString, e:
8216                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8217                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8218                                                         del e
8219                                                         return 1
8220                                                 if myfilesdict is None:
8221                                                         myfilesdict="[empty/missing/bad digest]"
8222                                                 else:
8223                                                         for myfetchfile in myfilesdict:
8224                                                                 if myfetchfile not in myfetchlist:
8225                                                                         mysize+=myfilesdict[myfetchfile]
8226                                                                         myfetchlist.append(myfetchfile)
8227                                                         if ordered:
8228                                                                 counters.totalsize += mysize
8229                                                 verboseadd += format_size(mysize)
8230
8231                                         # overlay verbose
8232                                         # assign index for a previous version in the same slot
8233                                         has_previous = False
8234                                         repo_name_prev = None
8235                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8236                                                 metadata["SLOT"])
8237                                         slot_matches = vardb.match(slot_atom)
8238                                         if slot_matches:
8239                                                 has_previous = True
8240                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8241                                                         ["repository"])[0]
8242
8243                                         # now use the data to generate output
8244                                         if pkg.installed or not has_previous:
8245                                                 repoadd = repo_display.repoStr(repo_path_real)
8246                                         else:
8247                                                 repo_path_prev = None
8248                                                 if repo_name_prev:
8249                                                         repo_path_prev = portdb.getRepositoryPath(
8250                                                                 repo_name_prev)
8251                                                 if repo_path_prev == repo_path_real:
8252                                                         repoadd = repo_display.repoStr(repo_path_real)
8253                                                 else:
8254                                                         repoadd = "%s=>%s" % (
8255                                                                 repo_display.repoStr(repo_path_prev),
8256                                                                 repo_display.repoStr(repo_path_real))
8257                                         if repoadd:
8258                                                 repoadd_set.add(repoadd)
8259
8260                                 xs = [portage.cpv_getkey(pkg_key)] + \
8261                                         list(portage.catpkgsplit(pkg_key)[2:])
8262                                 if xs[2] == "r0":
8263                                         xs[2] = ""
8264                                 else:
8265                                         xs[2] = "-" + xs[2]
8266
8267                                 mywidth = 130
8268                                 if "COLUMNWIDTH" in self.settings:
8269                                         try:
8270                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8271                                         except ValueError, e:
8272                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8273                                                 portage.writemsg(
8274                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8275                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8276                                                 del e
8277                                 oldlp = mywidth - 30
8278                                 newlp = oldlp - 30
8279
8280                                 # Convert myoldbest from a list to a string.
8281                                 if not myoldbest:
8282                                         myoldbest = ""
8283                                 else:
8284                                         for pos, key in enumerate(myoldbest):
8285                                                 key = portage.catpkgsplit(key)[2] + \
8286                                                         "-" + portage.catpkgsplit(key)[3]
8287                                                 if key[-3:] == "-r0":
8288                                                         key = key[:-3]
8289                                                 myoldbest[pos] = key
8290                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8291
8292                                 pkg_cp = xs[0]
8293                                 root_config = self.roots[myroot]
8294                                 system_set = root_config.sets["system"]
8295                                 world_set  = root_config.sets["world"]
8296
8297                                 pkg_system = False
8298                                 pkg_world = False
8299                                 try:
8300                                         pkg_system = system_set.findAtomForPackage(pkg)
8301                                         pkg_world  = world_set.findAtomForPackage(pkg)
8302                                         if not (oneshot or pkg_world) and \
8303                                                 myroot == self.target_root and \
8304                                                 favorites_set.findAtomForPackage(pkg):
8305                                                 # Maybe it will be added to world now.
8306                                                 if create_world_atom(pkg, favorites_set, root_config):
8307                                                         pkg_world = True
8308                                 except portage.exception.InvalidDependString:
8309                                         # This is reported elsewhere if relevant.
8310                                         pass
8311
8312                                 def pkgprint(pkg_str):
8313                                         if pkg_merge:
8314                                                 if pkg_system:
8315                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8316                                                 elif pkg_world:
8317                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8318                                                 else:
8319                                                         return colorize("PKG_MERGE", pkg_str)
8320                                         elif pkg_status == "uninstall":
8321                                                 return colorize("PKG_UNINSTALL", pkg_str)
8322                                         else:
8323                                                 if pkg_system:
8324                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8325                                                 elif pkg_world:
8326                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8327                                                 else:
8328                                                         return colorize("PKG_NOMERGE", pkg_str)
8329
8330                                 try:
8331                                         properties = flatten(use_reduce(paren_reduce(
8332                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8333                                 except portage.exception.InvalidDependString, e:
8334                                         if not pkg.installed:
8335                                                 show_invalid_depstring_notice(pkg,
8336                                                         pkg.metadata["PROPERTIES"], str(e))
8337                                                 del e
8338                                                 return 1
8339                                         properties = []
8340                                 interactive = "interactive" in properties
8341                                 if interactive and pkg.operation == "merge":
8342                                         addl = colorize("WARN", "I") + addl[1:]
8343                                         if ordered:
8344                                                 counters.interactive += 1
8345
8346                                 if x[1]!="/":
8347                                         if myoldbest:
8348                                                 myoldbest +=" "
8349                                         if "--columns" in self.myopts:
8350                                                 if "--quiet" in self.myopts:
8351                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8352                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8353                                                         myprint=myprint+myoldbest
8354                                                         myprint=myprint+darkgreen("to "+x[1])
8355                                                         verboseadd = None
8356                                                 else:
8357                                                         if not pkg_merge:
8358                                                                 myprint = "[%s] %s%s" % \
8359                                                                         (pkgprint(pkg_status.ljust(13)),
8360                                                                         indent, pkgprint(pkg.cp))
8361                                                         else:
8362                                                                 myprint = "[%s %s] %s%s" % \
8363                                                                         (pkgprint(pkg.type_name), addl,
8364                                                                         indent, pkgprint(pkg.cp))
8365                                                         if (newlp-nc_len(myprint)) > 0:
8366                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8367                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8368                                                         if (oldlp-nc_len(myprint)) > 0:
8369                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8370                                                         myprint=myprint+myoldbest
8371                                                         myprint += darkgreen("to " + pkg.root)
8372                                         else:
8373                                                 if not pkg_merge:
8374                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8375                                                 else:
8376                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8377                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8378                                                         myoldbest + darkgreen("to " + myroot)
8379                                 else:
8380                                         if "--columns" in self.myopts:
8381                                                 if "--quiet" in self.myopts:
8382                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8383                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8384                                                         myprint=myprint+myoldbest
8385                                                         verboseadd = None
8386                                                 else:
8387                                                         if not pkg_merge:
8388                                                                 myprint = "[%s] %s%s" % \
8389                                                                         (pkgprint(pkg_status.ljust(13)),
8390                                                                         indent, pkgprint(pkg.cp))
8391                                                         else:
8392                                                                 myprint = "[%s %s] %s%s" % \
8393                                                                         (pkgprint(pkg.type_name), addl,
8394                                                                         indent, pkgprint(pkg.cp))
8395                                                         if (newlp-nc_len(myprint)) > 0:
8396                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8397                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8398                                                         if (oldlp-nc_len(myprint)) > 0:
8399                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8400                                                         myprint += myoldbest
8401                                         else:
8402                                                 if not pkg_merge:
8403                                                         myprint = "[%s] %s%s %s" % \
8404                                                                 (pkgprint(pkg_status.ljust(13)),
8405                                                                 indent, pkgprint(pkg.cpv),
8406                                                                 myoldbest)
8407                                                 else:
8408                                                         myprint = "[%s %s] %s%s %s" % \
8409                                                                 (pkgprint(pkg_type), addl, indent,
8410                                                                 pkgprint(pkg.cpv), myoldbest)
8411
8412                                 if columns and pkg.operation == "uninstall":
8413                                         continue
8414                                 p.append((myprint, verboseadd, repoadd))
8415
8416                                 if "--tree" not in self.myopts and \
8417                                         "--quiet" not in self.myopts and \
8418                                         not self._opts_no_restart.intersection(self.myopts) and \
8419                                         pkg.root == self._running_root.root and \
8420                                         portage.match_from_list(
8421                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8422                                         not vardb.cpv_exists(pkg.cpv) and \
8423                                         "--quiet" not in self.myopts:
8424                                                 if mylist_index < len(mylist) - 1:
8425                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8426                                                         p.append(colorize("WARN", "    then resume the merge."))
8427
8428                 out = sys.stdout
8429                 show_repos = repoadd_set and repoadd_set != set(["0"])
8430
8431                 for x in p:
8432                         if isinstance(x, basestring):
8433                                 out.write("%s\n" % (x,))
8434                                 continue
8435
8436                         myprint, verboseadd, repoadd = x
8437
8438                         if verboseadd:
8439                                 myprint += " " + verboseadd
8440
8441                         if show_repos and repoadd:
8442                                 myprint += " " + teal("[%s]" % repoadd)
8443
8444                         out.write("%s\n" % (myprint,))
8445
8446                 for x in blockers:
8447                         print x
8448
8449                 if verbosity == 3:
8450                         print
8451                         print counters
8452                         if show_repos:
8453                                 sys.stdout.write(str(repo_display))
8454
8455                 if "--changelog" in self.myopts:
8456                         print
8457                         for revision,text in changelogs:
8458                                 print bold('*'+revision)
8459                                 sys.stdout.write(text)
8460
8461                 sys.stdout.flush()
8462                 return os.EX_OK
8463
8464         def display_problems(self):
8465                 """
8466                 Display problems with the dependency graph such as slot collisions.
8467                 This is called internally by display() to show the problems _after_
8468                 the merge list where it is most likely to be seen, but if display()
8469                 is not going to be called then this method should be called explicitly
8470                 to ensure that the user is notified of problems with the graph.
8471
8472                 All output goes to stderr, except for unsatisfied dependencies which
8473                 go to stdout for parsing by programs such as autounmask.
8474                 """
8475
8476                 # Note that show_masked_packages() sends it's output to
8477                 # stdout, and some programs such as autounmask parse the
8478                 # output in cases when emerge bails out. However, when
8479                 # show_masked_packages() is called for installed packages
8480                 # here, the message is a warning that is more appropriate
8481                 # to send to stderr, so temporarily redirect stdout to
8482                 # stderr. TODO: Fix output code so there's a cleaner way
8483                 # to redirect everything to stderr.
8484                 sys.stdout.flush()
8485                 sys.stderr.flush()
8486                 stdout = sys.stdout
8487                 try:
8488                         sys.stdout = sys.stderr
8489                         self._display_problems()
8490                 finally:
8491                         sys.stdout = stdout
8492                         sys.stdout.flush()
8493                         sys.stderr.flush()
8494
8495                 # This goes to stdout for parsing by programs like autounmask.
8496                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8497                         self._show_unsatisfied_dep(*pargs, **kwargs)
8498
8499         def _display_problems(self):
8500                 if self._circular_deps_for_display is not None:
8501                         self._show_circular_deps(
8502                                 self._circular_deps_for_display)
8503
8504                 # The user is only notified of a slot conflict if
8505                 # there are no unresolvable blocker conflicts.
8506                 if self._unsatisfied_blockers_for_display is not None:
8507                         self._show_unsatisfied_blockers(
8508                                 self._unsatisfied_blockers_for_display)
8509                 else:
8510                         self._show_slot_collision_notice()
8511
8512                 # TODO: Add generic support for "set problem" handlers so that
8513                 # the below warnings aren't special cases for world only.
8514
8515                 if self._missing_args:
8516                         world_problems = False
8517                         if "world" in self._sets:
8518                                 # Filter out indirect members of world (from nested sets)
8519                                 # since only direct members of world are desired here.
8520                                 world_set = self.roots[self.target_root].sets["world"]
8521                                 for arg, atom in self._missing_args:
8522                                         if arg.name == "world" and atom in world_set:
8523                                                 world_problems = True
8524                                                 break
8525
8526                         if world_problems:
8527                                 sys.stderr.write("\n!!! Problems have been " + \
8528                                         "detected with your world file\n")
8529                                 sys.stderr.write("!!! Please run " + \
8530                                         green("emaint --check world")+"\n\n")
8531
8532                 if self._missing_args:
8533                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8534                                 " Ebuilds for the following packages are either all\n")
8535                         sys.stderr.write(colorize("BAD", "!!!") + \
8536                                 " masked or don't exist:\n")
8537                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8538                                 self._missing_args) + "\n")
8539
8540                 if self._pprovided_args:
8541                         arg_refs = {}
8542                         for arg, atom in self._pprovided_args:
8543                                 if isinstance(arg, SetArg):
8544                                         parent = arg.name
8545                                         arg_atom = (atom, atom)
8546                                 else:
8547                                         parent = "args"
8548                                         arg_atom = (arg.arg, atom)
8549                                 refs = arg_refs.setdefault(arg_atom, [])
8550                                 if parent not in refs:
8551                                         refs.append(parent)
8552                         msg = []
8553                         msg.append(bad("\nWARNING: "))
8554                         if len(self._pprovided_args) > 1:
8555                                 msg.append("Requested packages will not be " + \
8556                                         "merged because they are listed in\n")
8557                         else:
8558                                 msg.append("A requested package will not be " + \
8559                                         "merged because it is listed in\n")
8560                         msg.append("package.provided:\n\n")
8561                         problems_sets = set()
8562                         for (arg, atom), refs in arg_refs.iteritems():
8563                                 ref_string = ""
8564                                 if refs:
8565                                         problems_sets.update(refs)
8566                                         refs.sort()
8567                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8568                                         ref_string = " pulled in by " + ref_string
8569                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8570                         msg.append("\n")
8571                         if "world" in problems_sets:
8572                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8573                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8574                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8575                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8576                                 msg.append("The best course of action depends on the reason that an offending\n")
8577                                 msg.append("package.provided entry exists.\n\n")
8578                         sys.stderr.write("".join(msg))
8579
8580                 masked_packages = []
8581                 for pkg in self._masked_installed:
8582                         root_config = pkg.root_config
8583                         pkgsettings = self.pkgsettings[pkg.root]
8584                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8585                         masked_packages.append((root_config, pkgsettings,
8586                                 pkg.cpv, pkg.metadata, mreasons))
8587                 if masked_packages:
8588                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8589                                 " The following installed packages are masked:\n")
8590                         show_masked_packages(masked_packages)
8591                         show_mask_docs()
8592                         print
8593
8594         def calc_changelog(self,ebuildpath,current,next):
8595                 if ebuildpath == None or not os.path.exists(ebuildpath):
8596                         return []
8597                 current = '-'.join(portage.catpkgsplit(current)[1:])
8598                 if current.endswith('-r0'):
8599                         current = current[:-3]
8600                 next = '-'.join(portage.catpkgsplit(next)[1:])
8601                 if next.endswith('-r0'):
8602                         next = next[:-3]
8603                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8604                 try:
8605                         changelog = open(changelogpath).read()
8606                 except SystemExit, e:
8607                         raise # Needed else can't exit
8608                 except:
8609                         return []
8610                 divisions = self.find_changelog_tags(changelog)
8611                 #print 'XX from',current,'to',next
8612                 #for div,text in divisions: print 'XX',div
8613                 # skip entries for all revisions above the one we are about to emerge
8614                 for i in range(len(divisions)):
8615                         if divisions[i][0]==next:
8616                                 divisions = divisions[i:]
8617                                 break
8618                 # find out how many entries we are going to display
8619                 for i in range(len(divisions)):
8620                         if divisions[i][0]==current:
8621                                 divisions = divisions[:i]
8622                                 break
8623                 else:
8624                     # couldnt find the current revision in the list. display nothing
8625                         return []
8626                 return divisions
8627
8628         def find_changelog_tags(self,changelog):
8629                 divs = []
8630                 release = None
8631                 while 1:
8632                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8633                         if match is None:
8634                                 if release is not None:
8635                                         divs.append((release,changelog))
8636                                 return divs
8637                         if release is not None:
8638                                 divs.append((release,changelog[:match.start()]))
8639                         changelog = changelog[match.end():]
8640                         release = match.group(1)
8641                         if release.endswith('.ebuild'):
8642                                 release = release[:-7]
8643                         if release.endswith('-r0'):
8644                                 release = release[:-3]
8645
8646         def saveNomergeFavorites(self):
8647                 """Find atoms in favorites that are not in the mergelist and add them
8648                 to the world file if necessary."""
8649                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8650                         "--oneshot", "--onlydeps", "--pretend"):
8651                         if x in self.myopts:
8652                                 return
8653                 root_config = self.roots[self.target_root]
8654                 world_set = root_config.sets["world"]
8655
8656                 world_locked = False
8657                 if hasattr(world_set, "lock"):
8658                         world_set.lock()
8659                         world_locked = True
8660
8661                 if hasattr(world_set, "load"):
8662                         world_set.load() # maybe it's changed on disk
8663
8664                 args_set = self._sets["args"]
8665                 portdb = self.trees[self.target_root]["porttree"].dbapi
8666                 added_favorites = set()
8667                 for x in self._set_nodes:
8668                         pkg_type, root, pkg_key, pkg_status = x
8669                         if pkg_status != "nomerge":
8670                                 continue
8671
8672                         try:
8673                                 myfavkey = create_world_atom(x, args_set, root_config)
8674                                 if myfavkey:
8675                                         if myfavkey in added_favorites:
8676                                                 continue
8677                                         added_favorites.add(myfavkey)
8678                         except portage.exception.InvalidDependString, e:
8679                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8680                                         (pkg_key, str(e)), noiselevel=-1)
8681                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8682                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8683                                 del e
8684                 all_added = []
8685                 for k in self._sets:
8686                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8687                                 continue
8688                         s = SETPREFIX + k
8689                         if s in world_set:
8690                                 continue
8691                         all_added.append(SETPREFIX + k)
8692                 all_added.extend(added_favorites)
8693                 all_added.sort()
8694                 for a in all_added:
8695                         print ">>> Recording %s in \"world\" favorites file..." % \
8696                                 colorize("INFORM", str(a))
8697                 if all_added:
8698                         world_set.update(all_added)
8699
8700                 if world_locked:
8701                         world_set.unlock()
8702
8703         def loadResumeCommand(self, resume_data, skip_masked=False):
8704                 """
8705                 Add a resume command to the graph and validate it in the process.  This
8706                 will raise a PackageNotFound exception if a package is not available.
8707                 """
8708
8709                 if not isinstance(resume_data, dict):
8710                         return False
8711
8712                 mergelist = resume_data.get("mergelist")
8713                 if not isinstance(mergelist, list):
8714                         mergelist = []
8715
8716                 fakedb = self.mydbapi
8717                 trees = self.trees
8718                 serialized_tasks = []
8719                 masked_tasks = []
8720                 for x in mergelist:
8721                         if not (isinstance(x, list) and len(x) == 4):
8722                                 continue
8723                         pkg_type, myroot, pkg_key, action = x
8724                         if pkg_type not in self.pkg_tree_map:
8725                                 continue
8726                         if action != "merge":
8727                                 continue
8728                         tree_type = self.pkg_tree_map[pkg_type]
8729                         mydb = trees[myroot][tree_type].dbapi
8730                         db_keys = list(self._trees_orig[myroot][
8731                                 tree_type].dbapi._aux_cache_keys)
8732                         try:
8733                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8734                         except KeyError:
8735                                 # It does no exist or it is corrupt.
8736                                 if action == "uninstall":
8737                                         continue
8738                                 raise portage.exception.PackageNotFound(pkg_key)
8739                         installed = action == "uninstall"
8740                         built = pkg_type != "ebuild"
8741                         root_config = self.roots[myroot]
8742                         pkg = Package(built=built, cpv=pkg_key,
8743                                 installed=installed, metadata=metadata,
8744                                 operation=action, root_config=root_config,
8745                                 type_name=pkg_type)
8746                         if pkg_type == "ebuild":
8747                                 pkgsettings = self.pkgsettings[myroot]
8748                                 pkgsettings.setcpv(pkg)
8749                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8750                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8751                         self._pkg_cache[pkg] = pkg
8752
8753                         root_config = self.roots[pkg.root]
8754                         if "merge" == pkg.operation and \
8755                                 not visible(root_config.settings, pkg):
8756                                 if skip_masked:
8757                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8758                                 else:
8759                                         self._unsatisfied_deps_for_display.append(
8760                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8761
8762                         fakedb[myroot].cpv_inject(pkg)
8763                         serialized_tasks.append(pkg)
8764                         self.spinner.update()
8765
8766                 if self._unsatisfied_deps_for_display:
8767                         return False
8768
8769                 if not serialized_tasks or "--nodeps" in self.myopts:
8770                         self._serialized_tasks_cache = serialized_tasks
8771                         self._scheduler_graph = self.digraph
8772                 else:
8773                         self._select_package = self._select_pkg_from_graph
8774                         self.myparams.add("selective")
8775                         # Always traverse deep dependencies in order to account for
8776                         # potentially unsatisfied dependencies of installed packages.
8777                         # This is necessary for correct --keep-going or --resume operation
8778                         # in case a package from a group of circularly dependent packages
8779                         # fails. In this case, a package which has recently been installed
8780                         # may have an unsatisfied circular dependency (pulled in by
8781                         # PDEPEND, for example). So, even though a package is already
8782                         # installed, it may not have all of it's dependencies satisfied, so
8783                         # it may not be usable. If such a package is in the subgraph of
8784                         # deep depenedencies of a scheduled build, that build needs to
8785                         # be cancelled. In order for this type of situation to be
8786                         # recognized, deep traversal of dependencies is required.
8787                         self.myparams.add("deep")
8788
8789                         favorites = resume_data.get("favorites")
8790                         args_set = self._sets["args"]
8791                         if isinstance(favorites, list):
8792                                 args = self._load_favorites(favorites)
8793                         else:
8794                                 args = []
8795
8796                         for task in serialized_tasks:
8797                                 if isinstance(task, Package) and \
8798                                         task.operation == "merge":
8799                                         if not self._add_pkg(task, None):
8800                                                 return False
8801
8802                         # Packages for argument atoms need to be explicitly
8803                         # added via _add_pkg() so that they are included in the
8804                         # digraph (needed at least for --tree display).
8805                         for arg in args:
8806                                 for atom in arg.set:
8807                                         pkg, existing_node = self._select_package(
8808                                                 arg.root_config.root, atom)
8809                                         if existing_node is None and \
8810                                                 pkg is not None:
8811                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8812                                                         root=pkg.root, parent=arg)):
8813                                                         return False
8814
8815                         # Allow unsatisfied deps here to avoid showing a masking
8816                         # message for an unsatisfied dep that isn't necessarily
8817                         # masked.
8818                         if not self._create_graph(allow_unsatisfied=True):
8819                                 return False
8820
8821                         unsatisfied_deps = []
8822                         for dep in self._unsatisfied_deps:
8823                                 if not isinstance(dep.parent, Package):
8824                                         continue
8825                                 if dep.parent.operation == "merge":
8826                                         unsatisfied_deps.append(dep)
8827                                         continue
8828
8829                                 # For unsatisfied deps of installed packages, only account for
8830                                 # them if they are in the subgraph of dependencies of a package
8831                                 # which is scheduled to be installed.
8832                                 unsatisfied_install = False
8833                                 traversed = set()
8834                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8835                                 while dep_stack:
8836                                         node = dep_stack.pop()
8837                                         if not isinstance(node, Package):
8838                                                 continue
8839                                         if node.operation == "merge":
8840                                                 unsatisfied_install = True
8841                                                 break
8842                                         if node in traversed:
8843                                                 continue
8844                                         traversed.add(node)
8845                                         dep_stack.extend(self.digraph.parent_nodes(node))
8846
8847                                 if unsatisfied_install:
8848                                         unsatisfied_deps.append(dep)
8849
8850                         if masked_tasks or unsatisfied_deps:
8851                                 # This probably means that a required package
8852                                 # was dropped via --skipfirst. It makes the
8853                                 # resume list invalid, so convert it to a
8854                                 # UnsatisfiedResumeDep exception.
8855                                 raise self.UnsatisfiedResumeDep(self,
8856                                         masked_tasks + unsatisfied_deps)
8857                         self._serialized_tasks_cache = None
8858                         try:
8859                                 self.altlist()
8860                         except self._unknown_internal_error:
8861                                 return False
8862
8863                 return True
8864
8865         def _load_favorites(self, favorites):
8866                 """
8867                 Use a list of favorites to resume state from a
8868                 previous select_files() call. This creates similar
8869                 DependencyArg instances to those that would have
8870                 been created by the original select_files() call.
8871                 This allows Package instances to be matched with
8872                 DependencyArg instances during graph creation.
8873                 """
8874                 root_config = self.roots[self.target_root]
8875                 getSetAtoms = root_config.setconfig.getSetAtoms
8876                 sets = root_config.sets
8877                 args = []
8878                 for x in favorites:
8879                         if not isinstance(x, basestring):
8880                                 continue
8881                         if x in ("system", "world"):
8882                                 x = SETPREFIX + x
8883                         if x.startswith(SETPREFIX):
8884                                 s = x[len(SETPREFIX):]
8885                                 if s not in sets:
8886                                         continue
8887                                 if s in self._sets:
8888                                         continue
8889                                 # Recursively expand sets so that containment tests in
8890                                 # self._get_parent_sets() properly match atoms in nested
8891                                 # sets (like if world contains system).
8892                                 expanded_set = InternalPackageSet(
8893                                         initial_atoms=getSetAtoms(s))
8894                                 self._sets[s] = expanded_set
8895                                 args.append(SetArg(arg=x, set=expanded_set,
8896                                         root_config=root_config))
8897                         else:
8898                                 if not portage.isvalidatom(x):
8899                                         continue
8900                                 args.append(AtomArg(arg=x, atom=x,
8901                                         root_config=root_config))
8902
8903                 self._set_args(args)
8904                 return args
8905
8906         class UnsatisfiedResumeDep(portage.exception.PortageException):
8907                 """
8908                 A dependency of a resume list is not installed. This
8909                 can occur when a required package is dropped from the
8910                 merge list via --skipfirst.
8911                 """
8912                 def __init__(self, depgraph, value):
8913                         portage.exception.PortageException.__init__(self, value)
8914                         self.depgraph = depgraph
8915
8916         class _internal_exception(portage.exception.PortageException):
8917                 def __init__(self, value=""):
8918                         portage.exception.PortageException.__init__(self, value)
8919
8920         class _unknown_internal_error(_internal_exception):
8921                 """
8922                 Used by the depgraph internally to terminate graph creation.
8923                 The specific reason for the failure should have been dumped
8924                 to stderr, unfortunately, the exact reason for the failure
8925                 may not be known.
8926                 """
8927
8928         class _serialize_tasks_retry(_internal_exception):
8929                 """
8930                 This is raised by the _serialize_tasks() method when it needs to
8931                 be called again for some reason. The only case that it's currently
8932                 used for is when neglected dependencies need to be added to the
8933                 graph in order to avoid making a potentially unsafe decision.
8934                 """
8935
8936         class _dep_check_composite_db(portage.dbapi):
8937                 """
8938                 A dbapi-like interface that is optimized for use in dep_check() calls.
8939                 This is built on top of the existing depgraph package selection logic.
8940                 Some packages that have been added to the graph may be masked from this
8941                 view in order to influence the atom preference selection that occurs
8942                 via dep_check().
8943                 """
8944                 def __init__(self, depgraph, root):
8945                         portage.dbapi.__init__(self)
8946                         self._depgraph = depgraph
8947                         self._root = root
8948                         self._match_cache = {}
8949                         self._cpv_pkg_map = {}
8950
8951                 def _clear_cache(self):
8952                         self._match_cache.clear()
8953                         self._cpv_pkg_map.clear()
8954
8955                 def match(self, atom):
8956                         ret = self._match_cache.get(atom)
8957                         if ret is not None:
8958                                 return ret[:]
8959                         orig_atom = atom
8960                         if "/" not in atom:
8961                                 atom = self._dep_expand(atom)
8962                         pkg, existing = self._depgraph._select_package(self._root, atom)
8963                         if not pkg:
8964                                 ret = []
8965                         else:
8966                                 # Return the highest available from select_package() as well as
8967                                 # any matching slots in the graph db.
8968                                 slots = set()
8969                                 slots.add(pkg.metadata["SLOT"])
8970                                 atom_cp = portage.dep_getkey(atom)
8971                                 if pkg.cp.startswith("virtual/"):
8972                                         # For new-style virtual lookahead that occurs inside
8973                                         # dep_check(), examine all slots. This is needed
8974                                         # so that newer slots will not unnecessarily be pulled in
8975                                         # when a satisfying lower slot is already installed. For
8976                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8977                                         # there's no need to pull in a newer slot to satisfy a
8978                                         # virtual/jdk dependency.
8979                                         for db, pkg_type, built, installed, db_keys in \
8980                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8981                                                 for cpv in db.match(atom):
8982                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8983                                                                 continue
8984                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8985                                 ret = []
8986                                 if self._visible(pkg):
8987                                         self._cpv_pkg_map[pkg.cpv] = pkg
8988                                         ret.append(pkg.cpv)
8989                                 slots.remove(pkg.metadata["SLOT"])
8990                                 while slots:
8991                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8992                                         pkg, existing = self._depgraph._select_package(
8993                                                 self._root, slot_atom)
8994                                         if not pkg:
8995                                                 continue
8996                                         if not self._visible(pkg):
8997                                                 continue
8998                                         self._cpv_pkg_map[pkg.cpv] = pkg
8999                                         ret.append(pkg.cpv)
9000                                 if ret:
9001                                         self._cpv_sort_ascending(ret)
9002                         self._match_cache[orig_atom] = ret
9003                         return ret[:]
9004
9005                 def _visible(self, pkg):
9006                         if pkg.installed and "selective" not in self._depgraph.myparams:
9007                                 try:
9008                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9009                                 except (StopIteration, portage.exception.InvalidDependString):
9010                                         arg = None
9011                                 if arg:
9012                                         return False
9013                         if pkg.installed:
9014                                 try:
9015                                         if not visible(
9016                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9017                                                 return False
9018                                 except portage.exception.InvalidDependString:
9019                                         pass
9020                         in_graph = self._depgraph._slot_pkg_map[
9021                                 self._root].get(pkg.slot_atom)
9022                         if in_graph is None:
9023                                 # Mask choices for packages which are not the highest visible
9024                                 # version within their slot (since they usually trigger slot
9025                                 # conflicts).
9026                                 highest_visible, in_graph = self._depgraph._select_package(
9027                                         self._root, pkg.slot_atom)
9028                                 if pkg != highest_visible:
9029                                         return False
9030                         elif in_graph != pkg:
9031                                 # Mask choices for packages that would trigger a slot
9032                                 # conflict with a previously selected package.
9033                                 return False
9034                         return True
9035
9036                 def _dep_expand(self, atom):
9037                         """
9038                         This is only needed for old installed packages that may
9039                         contain atoms that are not fully qualified with a specific
9040                         category. Emulate the cpv_expand() function that's used by
9041                         dbapi.match() in cases like this. If there are multiple
9042                         matches, it's often due to a new-style virtual that has
9043                         been added, so try to filter those out to avoid raising
9044                         a ValueError.
9045                         """
9046                         root_config = self._depgraph.roots[self._root]
9047                         orig_atom = atom
9048                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9049                         if len(expanded_atoms) > 1:
9050                                 non_virtual_atoms = []
9051                                 for x in expanded_atoms:
9052                                         if not portage.dep_getkey(x).startswith("virtual/"):
9053                                                 non_virtual_atoms.append(x)
9054                                 if len(non_virtual_atoms) == 1:
9055                                         expanded_atoms = non_virtual_atoms
9056                         if len(expanded_atoms) > 1:
9057                                 # compatible with portage.cpv_expand()
9058                                 raise portage.exception.AmbiguousPackageName(
9059                                         [portage.dep_getkey(x) for x in expanded_atoms])
9060                         if expanded_atoms:
9061                                 atom = expanded_atoms[0]
9062                         else:
9063                                 null_atom = insert_category_into_atom(atom, "null")
9064                                 null_cp = portage.dep_getkey(null_atom)
9065                                 cat, atom_pn = portage.catsplit(null_cp)
9066                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9067                                 if virts_p:
9068                                         # Allow the resolver to choose which virtual.
9069                                         atom = insert_category_into_atom(atom, "virtual")
9070                                 else:
9071                                         atom = insert_category_into_atom(atom, "null")
9072                         return atom
9073
9074                 def aux_get(self, cpv, wants):
9075                         metadata = self._cpv_pkg_map[cpv].metadata
9076                         return [metadata.get(x, "") for x in wants]
9077
9078 class RepoDisplay(object):
9079         def __init__(self, roots):
9080                 self._shown_repos = {}
9081                 self._unknown_repo = False
9082                 repo_paths = set()
9083                 for root_config in roots.itervalues():
9084                         portdir = root_config.settings.get("PORTDIR")
9085                         if portdir:
9086                                 repo_paths.add(portdir)
9087                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9088                         if overlays:
9089                                 repo_paths.update(overlays.split())
9090                 repo_paths = list(repo_paths)
9091                 self._repo_paths = repo_paths
9092                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9093                         for repo_path in repo_paths ]
9094
9095                 # pre-allocate index for PORTDIR so that it always has index 0.
9096                 for root_config in roots.itervalues():
9097                         portdb = root_config.trees["porttree"].dbapi
9098                         portdir = portdb.porttree_root
9099                         if portdir:
9100                                 self.repoStr(portdir)
9101
9102         def repoStr(self, repo_path_real):
9103                 real_index = -1
9104                 if repo_path_real:
9105                         real_index = self._repo_paths_real.index(repo_path_real)
9106                 if real_index == -1:
9107                         s = "?"
9108                         self._unknown_repo = True
9109                 else:
9110                         shown_repos = self._shown_repos
9111                         repo_paths = self._repo_paths
9112                         repo_path = repo_paths[real_index]
9113                         index = shown_repos.get(repo_path)
9114                         if index is None:
9115                                 index = len(shown_repos)
9116                                 shown_repos[repo_path] = index
9117                         s = str(index)
9118                 return s
9119
9120         def __str__(self):
9121                 output = []
9122                 shown_repos = self._shown_repos
9123                 unknown_repo = self._unknown_repo
9124                 if shown_repos or self._unknown_repo:
9125                         output.append("Portage tree and overlays:\n")
9126                 show_repo_paths = list(shown_repos)
9127                 for repo_path, repo_index in shown_repos.iteritems():
9128                         show_repo_paths[repo_index] = repo_path
9129                 if show_repo_paths:
9130                         for index, repo_path in enumerate(show_repo_paths):
9131                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9132                 if unknown_repo:
9133                         output.append(" "+teal("[?]") + \
9134                                 " indicates that the source repository could not be determined\n")
9135                 return "".join(output)
9136
9137 class PackageCounters(object):
9138
9139         def __init__(self):
9140                 self.upgrades   = 0
9141                 self.downgrades = 0
9142                 self.new        = 0
9143                 self.newslot    = 0
9144                 self.reinst     = 0
9145                 self.uninst     = 0
9146                 self.blocks     = 0
9147                 self.blocks_satisfied         = 0
9148                 self.totalsize  = 0
9149                 self.restrict_fetch           = 0
9150                 self.restrict_fetch_satisfied = 0
9151                 self.interactive              = 0
9152
9153         def __str__(self):
9154                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9155                 myoutput = []
9156                 details = []
9157                 myoutput.append("Total: %s package" % total_installs)
9158                 if total_installs != 1:
9159                         myoutput.append("s")
9160                 if total_installs != 0:
9161                         myoutput.append(" (")
9162                 if self.upgrades > 0:
9163                         details.append("%s upgrade" % self.upgrades)
9164                         if self.upgrades > 1:
9165                                 details[-1] += "s"
9166                 if self.downgrades > 0:
9167                         details.append("%s downgrade" % self.downgrades)
9168                         if self.downgrades > 1:
9169                                 details[-1] += "s"
9170                 if self.new > 0:
9171                         details.append("%s new" % self.new)
9172                 if self.newslot > 0:
9173                         details.append("%s in new slot" % self.newslot)
9174                         if self.newslot > 1:
9175                                 details[-1] += "s"
9176                 if self.reinst > 0:
9177                         details.append("%s reinstall" % self.reinst)
9178                         if self.reinst > 1:
9179                                 details[-1] += "s"
9180                 if self.uninst > 0:
9181                         details.append("%s uninstall" % self.uninst)
9182                         if self.uninst > 1:
9183                                 details[-1] += "s"
9184                 if self.interactive > 0:
9185                         details.append("%s %s" % (self.interactive,
9186                                 colorize("WARN", "interactive")))
9187                 myoutput.append(", ".join(details))
9188                 if total_installs != 0:
9189                         myoutput.append(")")
9190                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9191                 if self.restrict_fetch:
9192                         myoutput.append("\nFetch Restriction: %s package" % \
9193                                 self.restrict_fetch)
9194                         if self.restrict_fetch > 1:
9195                                 myoutput.append("s")
9196                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9197                         myoutput.append(bad(" (%s unsatisfied)") % \
9198                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9199                 if self.blocks > 0:
9200                         myoutput.append("\nConflict: %s block" % \
9201                                 self.blocks)
9202                         if self.blocks > 1:
9203                                 myoutput.append("s")
9204                         if self.blocks_satisfied < self.blocks:
9205                                 myoutput.append(bad(" (%s unsatisfied)") % \
9206                                         (self.blocks - self.blocks_satisfied))
9207                 return "".join(myoutput)
9208
9209 class PollSelectAdapter(PollConstants):
9210
9211         """
9212         Use select to emulate a poll object, for
9213         systems that don't support poll().
9214         """
9215
9216         def __init__(self):
9217                 self._registered = {}
9218                 self._select_args = [[], [], []]
9219
9220         def register(self, fd, *args):
9221                 """
9222                 Only POLLIN is currently supported!
9223                 """
9224                 if len(args) > 1:
9225                         raise TypeError(
9226                                 "register expected at most 2 arguments, got " + \
9227                                 repr(1 + len(args)))
9228
9229                 eventmask = PollConstants.POLLIN | \
9230                         PollConstants.POLLPRI | PollConstants.POLLOUT
9231                 if args:
9232                         eventmask = args[0]
9233
9234                 self._registered[fd] = eventmask
9235                 self._select_args = None
9236
9237         def unregister(self, fd):
9238                 self._select_args = None
9239                 del self._registered[fd]
9240
9241         def poll(self, *args):
9242                 if len(args) > 1:
9243                         raise TypeError(
9244                                 "poll expected at most 2 arguments, got " + \
9245                                 repr(1 + len(args)))
9246
9247                 timeout = None
9248                 if args:
9249                         timeout = args[0]
9250
9251                 select_args = self._select_args
9252                 if select_args is None:
9253                         select_args = [self._registered.keys(), [], []]
9254
9255                 if timeout is not None:
9256                         select_args = select_args[:]
9257                         # Translate poll() timeout args to select() timeout args:
9258                         #
9259                         #          | units        | value(s) for indefinite block
9260                         # ---------|--------------|------------------------------
9261                         #   poll   | milliseconds | omitted, negative, or None
9262                         # ---------|--------------|------------------------------
9263                         #   select | seconds      | omitted
9264                         # ---------|--------------|------------------------------
9265
9266                         if timeout is not None and timeout < 0:
9267                                 timeout = None
9268                         if timeout is not None:
9269                                 select_args.append(timeout / 1000)
9270
9271                 select_events = select.select(*select_args)
9272                 poll_events = []
9273                 for fd in select_events[0]:
9274                         poll_events.append((fd, PollConstants.POLLIN))
9275                 return poll_events
9276
9277 class SequentialTaskQueue(SlotObject):
9278
9279         __slots__ = ("max_jobs", "running_tasks") + \
9280                 ("_dirty", "_scheduling", "_task_queue")
9281
9282         def __init__(self, **kwargs):
9283                 SlotObject.__init__(self, **kwargs)
9284                 self._task_queue = deque()
9285                 self.running_tasks = set()
9286                 if self.max_jobs is None:
9287                         self.max_jobs = 1
9288                 self._dirty = True
9289
9290         def add(self, task):
9291                 self._task_queue.append(task)
9292                 self._dirty = True
9293
9294         def addFront(self, task):
9295                 self._task_queue.appendleft(task)
9296                 self._dirty = True
9297
9298         def schedule(self):
9299
9300                 if not self._dirty:
9301                         return False
9302
9303                 if not self:
9304                         return False
9305
9306                 if self._scheduling:
9307                         # Ignore any recursive schedule() calls triggered via
9308                         # self._task_exit().
9309                         return False
9310
9311                 self._scheduling = True
9312
9313                 task_queue = self._task_queue
9314                 running_tasks = self.running_tasks
9315                 max_jobs = self.max_jobs
9316                 state_changed = False
9317
9318                 while task_queue and \
9319                         (max_jobs is True or len(running_tasks) < max_jobs):
9320                         task = task_queue.popleft()
9321                         cancelled = getattr(task, "cancelled", None)
9322                         if not cancelled:
9323                                 running_tasks.add(task)
9324                                 task.addExitListener(self._task_exit)
9325                                 task.start()
9326                         state_changed = True
9327
9328                 self._dirty = False
9329                 self._scheduling = False
9330
9331                 return state_changed
9332
9333         def _task_exit(self, task):
9334                 """
9335                 Since we can always rely on exit listeners being called, the set of
9336                 running tasks is always pruned automatically and there is never any need
9337                 to actively prune it.
9338                 """
9339                 self.running_tasks.remove(task)
9340                 if self._task_queue:
9341                         self._dirty = True
9342
9343         def clear(self):
9344                 self._task_queue.clear()
9345                 running_tasks = self.running_tasks
9346                 while running_tasks:
9347                         task = running_tasks.pop()
9348                         task.removeExitListener(self._task_exit)
9349                         task.cancel()
9350                 self._dirty = False
9351
9352         def __nonzero__(self):
9353                 return bool(self._task_queue or self.running_tasks)
9354
9355         def __len__(self):
9356                 return len(self._task_queue) + len(self.running_tasks)
9357
9358 _can_poll_device = None
9359
9360 def can_poll_device():
9361         """
9362         Test if it's possible to use poll() on a device such as a pty. This
9363         is known to fail on Darwin.
9364         @rtype: bool
9365         @returns: True if poll() on a device succeeds, False otherwise.
9366         """
9367
9368         global _can_poll_device
9369         if _can_poll_device is not None:
9370                 return _can_poll_device
9371
9372         if not hasattr(select, "poll"):
9373                 _can_poll_device = False
9374                 return _can_poll_device
9375
9376         try:
9377                 dev_null = open('/dev/null', 'rb')
9378         except IOError:
9379                 _can_poll_device = False
9380                 return _can_poll_device
9381
9382         p = select.poll()
9383         p.register(dev_null.fileno(), PollConstants.POLLIN)
9384
9385         invalid_request = False
9386         for f, event in p.poll():
9387                 if event & PollConstants.POLLNVAL:
9388                         invalid_request = True
9389                         break
9390         dev_null.close()
9391
9392         _can_poll_device = not invalid_request
9393         return _can_poll_device
9394
9395 def create_poll_instance():
9396         """
9397         Create an instance of select.poll, or an instance of
9398         PollSelectAdapter there is no poll() implementation or
9399         it is broken somehow.
9400         """
9401         if can_poll_device():
9402                 return select.poll()
9403         return PollSelectAdapter()
9404
9405 getloadavg = getattr(os, "getloadavg", None)
9406 if getloadavg is None:
9407         def getloadavg():
9408                 """
9409                 Uses /proc/loadavg to emulate os.getloadavg().
9410                 Raises OSError if the load average was unobtainable.
9411                 """
9412                 try:
9413                         loadavg_str = open('/proc/loadavg').readline()
9414                 except IOError:
9415                         # getloadavg() is only supposed to raise OSError, so convert
9416                         raise OSError('unknown')
9417                 loadavg_split = loadavg_str.split()
9418                 if len(loadavg_split) < 3:
9419                         raise OSError('unknown')
9420                 loadavg_floats = []
9421                 for i in xrange(3):
9422                         try:
9423                                 loadavg_floats.append(float(loadavg_split[i]))
9424                         except ValueError:
9425                                 raise OSError('unknown')
9426                 return tuple(loadavg_floats)
9427
9428 class PollScheduler(object):
9429
9430         class _sched_iface_class(SlotObject):
9431                 __slots__ = ("register", "schedule", "unregister")
9432
9433         def __init__(self):
9434                 self._max_jobs = 1
9435                 self._max_load = None
9436                 self._jobs = 0
9437                 self._poll_event_queue = []
9438                 self._poll_event_handlers = {}
9439                 self._poll_event_handler_ids = {}
9440                 # Increment id for each new handler.
9441                 self._event_handler_id = 0
9442                 self._poll_obj = create_poll_instance()
9443                 self._scheduling = False
9444
9445         def _schedule(self):
9446                 """
9447                 Calls _schedule_tasks() and automatically returns early from
9448                 any recursive calls to this method that the _schedule_tasks()
9449                 call might trigger. This makes _schedule() safe to call from
9450                 inside exit listeners.
9451                 """
9452                 if self._scheduling:
9453                         return False
9454                 self._scheduling = True
9455                 try:
9456                         return self._schedule_tasks()
9457                 finally:
9458                         self._scheduling = False
9459
9460         def _running_job_count(self):
9461                 return self._jobs
9462
9463         def _can_add_job(self):
9464                 max_jobs = self._max_jobs
9465                 max_load = self._max_load
9466
9467                 if self._max_jobs is not True and \
9468                         self._running_job_count() >= self._max_jobs:
9469                         return False
9470
9471                 if max_load is not None and \
9472                         (max_jobs is True or max_jobs > 1) and \
9473                         self._running_job_count() >= 1:
9474                         try:
9475                                 avg1, avg5, avg15 = getloadavg()
9476                         except OSError:
9477                                 return False
9478
9479                         if avg1 >= max_load:
9480                                 return False
9481
9482                 return True
9483
9484         def _poll(self, timeout=None):
9485                 """
9486                 All poll() calls pass through here. The poll events
9487                 are added directly to self._poll_event_queue.
9488                 In order to avoid endless blocking, this raises
9489                 StopIteration if timeout is None and there are
9490                 no file descriptors to poll.
9491                 """
9492                 if not self._poll_event_handlers:
9493                         self._schedule()
9494                         if timeout is None and \
9495                                 not self._poll_event_handlers:
9496                                 raise StopIteration(
9497                                         "timeout is None and there are no poll() event handlers")
9498
9499                 # The following error is known to occur with Linux kernel versions
9500                 # less than 2.6.24:
9501                 #
9502                 #   select.error: (4, 'Interrupted system call')
9503                 #
9504                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9505                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9506                 # without any events.
9507                 while True:
9508                         try:
9509                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9510                                 break
9511                         except select.error, e:
9512                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9513                                         level=logging.ERROR, noiselevel=-1)
9514                                 del e
9515                                 if timeout is not None:
9516                                         break
9517
9518         def _next_poll_event(self, timeout=None):
9519                 """
9520                 Since the _schedule_wait() loop is called by event
9521                 handlers from _poll_loop(), maintain a central event
9522                 queue for both of them to share events from a single
9523                 poll() call. In order to avoid endless blocking, this
9524                 raises StopIteration if timeout is None and there are
9525                 no file descriptors to poll.
9526                 """
9527                 if not self._poll_event_queue:
9528                         self._poll(timeout)
9529                 return self._poll_event_queue.pop()
9530
9531         def _poll_loop(self):
9532
9533                 event_handlers = self._poll_event_handlers
9534                 event_handled = False
9535
9536                 try:
9537                         while event_handlers:
9538                                 f, event = self._next_poll_event()
9539                                 handler, reg_id = event_handlers[f]
9540                                 handler(f, event)
9541                                 event_handled = True
9542                 except StopIteration:
9543                         event_handled = True
9544
9545                 if not event_handled:
9546                         raise AssertionError("tight loop")
9547
9548         def _schedule_yield(self):
9549                 """
9550                 Schedule for a short period of time chosen by the scheduler based
9551                 on internal state. Synchronous tasks should call this periodically
9552                 in order to allow the scheduler to service pending poll events. The
9553                 scheduler will call poll() exactly once, without blocking, and any
9554                 resulting poll events will be serviced.
9555                 """
9556                 event_handlers = self._poll_event_handlers
9557                 events_handled = 0
9558
9559                 if not event_handlers:
9560                         return bool(events_handled)
9561
9562                 if not self._poll_event_queue:
9563                         self._poll(0)
9564
9565                 try:
9566                         while event_handlers and self._poll_event_queue:
9567                                 f, event = self._next_poll_event()
9568                                 handler, reg_id = event_handlers[f]
9569                                 handler(f, event)
9570                                 events_handled += 1
9571                 except StopIteration:
9572                         events_handled += 1
9573
9574                 return bool(events_handled)
9575
9576         def _register(self, f, eventmask, handler):
9577                 """
9578                 @rtype: Integer
9579                 @return: A unique registration id, for use in schedule() or
9580                         unregister() calls.
9581                 """
9582                 if f in self._poll_event_handlers:
9583                         raise AssertionError("fd %d is already registered" % f)
9584                 self._event_handler_id += 1
9585                 reg_id = self._event_handler_id
9586                 self._poll_event_handler_ids[reg_id] = f
9587                 self._poll_event_handlers[f] = (handler, reg_id)
9588                 self._poll_obj.register(f, eventmask)
9589                 return reg_id
9590
9591         def _unregister(self, reg_id):
9592                 f = self._poll_event_handler_ids[reg_id]
9593                 self._poll_obj.unregister(f)
9594                 del self._poll_event_handlers[f]
9595                 del self._poll_event_handler_ids[reg_id]
9596
9597         def _schedule_wait(self, wait_ids):
9598                 """
9599                 Schedule until wait_id is not longer registered
9600                 for poll() events.
9601                 @type wait_id: int
9602                 @param wait_id: a task id to wait for
9603                 """
9604                 event_handlers = self._poll_event_handlers
9605                 handler_ids = self._poll_event_handler_ids
9606                 event_handled = False
9607
9608                 if isinstance(wait_ids, int):
9609                         wait_ids = frozenset([wait_ids])
9610
9611                 try:
9612                         while wait_ids.intersection(handler_ids):
9613                                 f, event = self._next_poll_event()
9614                                 handler, reg_id = event_handlers[f]
9615                                 handler(f, event)
9616                                 event_handled = True
9617                 except StopIteration:
9618                         event_handled = True
9619
9620                 return event_handled
9621
9622 class QueueScheduler(PollScheduler):
9623
9624         """
9625         Add instances of SequentialTaskQueue and then call run(). The
9626         run() method returns when no tasks remain.
9627         """
9628
9629         def __init__(self, max_jobs=None, max_load=None):
9630                 PollScheduler.__init__(self)
9631
9632                 if max_jobs is None:
9633                         max_jobs = 1
9634
9635                 self._max_jobs = max_jobs
9636                 self._max_load = max_load
9637                 self.sched_iface = self._sched_iface_class(
9638                         register=self._register,
9639                         schedule=self._schedule_wait,
9640                         unregister=self._unregister)
9641
9642                 self._queues = []
9643                 self._schedule_listeners = []
9644
9645         def add(self, q):
9646                 self._queues.append(q)
9647
9648         def remove(self, q):
9649                 self._queues.remove(q)
9650
9651         def run(self):
9652
9653                 while self._schedule():
9654                         self._poll_loop()
9655
9656                 while self._running_job_count():
9657                         self._poll_loop()
9658
9659         def _schedule_tasks(self):
9660                 """
9661                 @rtype: bool
9662                 @returns: True if there may be remaining tasks to schedule,
9663                         False otherwise.
9664                 """
9665                 while self._can_add_job():
9666                         n = self._max_jobs - self._running_job_count()
9667                         if n < 1:
9668                                 break
9669
9670                         if not self._start_next_job(n):
9671                                 return False
9672
9673                 for q in self._queues:
9674                         if q:
9675                                 return True
9676                 return False
9677
9678         def _running_job_count(self):
9679                 job_count = 0
9680                 for q in self._queues:
9681                         job_count += len(q.running_tasks)
9682                 self._jobs = job_count
9683                 return job_count
9684
9685         def _start_next_job(self, n=1):
9686                 started_count = 0
9687                 for q in self._queues:
9688                         initial_job_count = len(q.running_tasks)
9689                         q.schedule()
9690                         final_job_count = len(q.running_tasks)
9691                         if final_job_count > initial_job_count:
9692                                 started_count += (final_job_count - initial_job_count)
9693                         if started_count >= n:
9694                                 break
9695                 return started_count
9696
9697 class TaskScheduler(object):
9698
9699         """
9700         A simple way to handle scheduling of AsynchrousTask instances. Simply
9701         add tasks and call run(). The run() method returns when no tasks remain.
9702         """
9703
9704         def __init__(self, max_jobs=None, max_load=None):
9705                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9706                 self._scheduler = QueueScheduler(
9707                         max_jobs=max_jobs, max_load=max_load)
9708                 self.sched_iface = self._scheduler.sched_iface
9709                 self.run = self._scheduler.run
9710                 self._scheduler.add(self._queue)
9711
9712         def add(self, task):
9713                 self._queue.add(task)
9714
9715 class JobStatusDisplay(object):
9716
9717         _bound_properties = ("curval", "failed", "running")
9718         _jobs_column_width = 48
9719
9720         # Don't update the display unless at least this much
9721         # time has passed, in units of seconds.
9722         _min_display_latency = 2
9723
9724         _default_term_codes = {
9725                 'cr'  : '\r',
9726                 'el'  : '\x1b[K',
9727                 'nel' : '\n',
9728         }
9729
9730         _termcap_name_map = {
9731                 'carriage_return' : 'cr',
9732                 'clr_eol'         : 'el',
9733                 'newline'         : 'nel',
9734         }
9735
9736         def __init__(self, out=sys.stdout, quiet=False):
9737                 object.__setattr__(self, "out", out)
9738                 object.__setattr__(self, "quiet", quiet)
9739                 object.__setattr__(self, "maxval", 0)
9740                 object.__setattr__(self, "merges", 0)
9741                 object.__setattr__(self, "_changed", False)
9742                 object.__setattr__(self, "_displayed", False)
9743                 object.__setattr__(self, "_last_display_time", 0)
9744                 object.__setattr__(self, "width", 80)
9745                 self.reset()
9746
9747                 isatty = hasattr(out, "isatty") and out.isatty()
9748                 object.__setattr__(self, "_isatty", isatty)
9749                 if not isatty or not self._init_term():
9750                         term_codes = {}
9751                         for k, capname in self._termcap_name_map.iteritems():
9752                                 term_codes[k] = self._default_term_codes[capname]
9753                         object.__setattr__(self, "_term_codes", term_codes)
9754                 encoding = sys.getdefaultencoding()
9755                 for k, v in self._term_codes.items():
9756                         if not isinstance(v, basestring):
9757                                 self._term_codes[k] = v.decode(encoding, 'replace')
9758
9759         def _init_term(self):
9760                 """
9761                 Initialize term control codes.
9762                 @rtype: bool
9763                 @returns: True if term codes were successfully initialized,
9764                         False otherwise.
9765                 """
9766
9767                 term_type = os.environ.get("TERM", "vt100")
9768                 tigetstr = None
9769
9770                 try:
9771                         import curses
9772                         try:
9773                                 curses.setupterm(term_type, self.out.fileno())
9774                                 tigetstr = curses.tigetstr
9775                         except curses.error:
9776                                 pass
9777                 except ImportError:
9778                         pass
9779
9780                 if tigetstr is None:
9781                         return False
9782
9783                 term_codes = {}
9784                 for k, capname in self._termcap_name_map.iteritems():
9785                         code = tigetstr(capname)
9786                         if code is None:
9787                                 code = self._default_term_codes[capname]
9788                         term_codes[k] = code
9789                 object.__setattr__(self, "_term_codes", term_codes)
9790                 return True
9791
9792         def _format_msg(self, msg):
9793                 return ">>> %s" % msg
9794
9795         def _erase(self):
9796                 self.out.write(
9797                         self._term_codes['carriage_return'] + \
9798                         self._term_codes['clr_eol'])
9799                 self.out.flush()
9800                 self._displayed = False
9801
9802         def _display(self, line):
9803                 self.out.write(line)
9804                 self.out.flush()
9805                 self._displayed = True
9806
9807         def _update(self, msg):
9808
9809                 out = self.out
9810                 if not self._isatty:
9811                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9812                         self.out.flush()
9813                         self._displayed = True
9814                         return
9815
9816                 if self._displayed:
9817                         self._erase()
9818
9819                 self._display(self._format_msg(msg))
9820
9821         def displayMessage(self, msg):
9822
9823                 was_displayed = self._displayed
9824
9825                 if self._isatty and self._displayed:
9826                         self._erase()
9827
9828                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9829                 self.out.flush()
9830                 self._displayed = False
9831
9832                 if was_displayed:
9833                         self._changed = True
9834                         self.display()
9835
9836         def reset(self):
9837                 self.maxval = 0
9838                 self.merges = 0
9839                 for name in self._bound_properties:
9840                         object.__setattr__(self, name, 0)
9841
9842                 if self._displayed:
9843                         self.out.write(self._term_codes['newline'])
9844                         self.out.flush()
9845                         self._displayed = False
9846
9847         def __setattr__(self, name, value):
9848                 old_value = getattr(self, name)
9849                 if value == old_value:
9850                         return
9851                 object.__setattr__(self, name, value)
9852                 if name in self._bound_properties:
9853                         self._property_change(name, old_value, value)
9854
9855         def _property_change(self, name, old_value, new_value):
9856                 self._changed = True
9857                 self.display()
9858
9859         def _load_avg_str(self):
9860                 try:
9861                         avg = getloadavg()
9862                 except OSError:
9863                         return 'unknown'
9864
9865                 max_avg = max(avg)
9866
9867                 if max_avg < 10:
9868                         digits = 2
9869                 elif max_avg < 100:
9870                         digits = 1
9871                 else:
9872                         digits = 0
9873
9874                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9875
9876         def display(self):
9877                 """
9878                 Display status on stdout, but only if something has
9879                 changed since the last call.
9880                 """
9881
9882                 if self.quiet:
9883                         return
9884
9885                 current_time = time.time()
9886                 time_delta = current_time - self._last_display_time
9887                 if self._displayed and \
9888                         not self._changed:
9889                         if not self._isatty:
9890                                 return
9891                         if time_delta < self._min_display_latency:
9892                                 return
9893
9894                 self._last_display_time = current_time
9895                 self._changed = False
9896                 self._display_status()
9897
9898         def _display_status(self):
9899                 # Don't use len(self._completed_tasks) here since that also
9900                 # can include uninstall tasks.
9901                 curval_str = str(self.curval)
9902                 maxval_str = str(self.maxval)
9903                 running_str = str(self.running)
9904                 failed_str = str(self.failed)
9905                 load_avg_str = self._load_avg_str()
9906
9907                 color_output = StringIO()
9908                 plain_output = StringIO()
9909                 style_file = portage.output.ConsoleStyleFile(color_output)
9910                 style_file.write_listener = plain_output
9911                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9912                 style_writer.style_listener = style_file.new_styles
9913                 f = formatter.AbstractFormatter(style_writer)
9914
9915                 number_style = "INFORM"
9916                 f.add_literal_data("Jobs: ")
9917                 f.push_style(number_style)
9918                 f.add_literal_data(curval_str)
9919                 f.pop_style()
9920                 f.add_literal_data(" of ")
9921                 f.push_style(number_style)
9922                 f.add_literal_data(maxval_str)
9923                 f.pop_style()
9924                 f.add_literal_data(" complete")
9925
9926                 if self.running:
9927                         f.add_literal_data(", ")
9928                         f.push_style(number_style)
9929                         f.add_literal_data(running_str)
9930                         f.pop_style()
9931                         f.add_literal_data(" running")
9932
9933                 if self.failed:
9934                         f.add_literal_data(", ")
9935                         f.push_style(number_style)
9936                         f.add_literal_data(failed_str)
9937                         f.pop_style()
9938                         f.add_literal_data(" failed")
9939
9940                 padding = self._jobs_column_width - len(plain_output.getvalue())
9941                 if padding > 0:
9942                         f.add_literal_data(padding * " ")
9943
9944                 f.add_literal_data("Load avg: ")
9945                 f.add_literal_data(load_avg_str)
9946
9947                 # Truncate to fit width, to avoid making the terminal scroll if the
9948                 # line overflows (happens when the load average is large).
9949                 plain_output = plain_output.getvalue()
9950                 if self._isatty and len(plain_output) > self.width:
9951                         # Use plain_output here since it's easier to truncate
9952                         # properly than the color output which contains console
9953                         # color codes.
9954                         self._update(plain_output[:self.width])
9955                 else:
9956                         self._update(color_output.getvalue())
9957
9958                 xtermTitle(" ".join(plain_output.split()))
9959
9960 class Scheduler(PollScheduler):
9961
9962         _opts_ignore_blockers = \
9963                 frozenset(["--buildpkgonly",
9964                 "--fetchonly", "--fetch-all-uri",
9965                 "--nodeps", "--pretend"])
9966
9967         _opts_no_background = \
9968                 frozenset(["--pretend",
9969                 "--fetchonly", "--fetch-all-uri"])
9970
9971         _opts_no_restart = frozenset(["--buildpkgonly",
9972                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9973
9974         _bad_resume_opts = set(["--ask", "--changelog",
9975                 "--resume", "--skipfirst"])
9976
9977         _fetch_log = "/var/log/emerge-fetch.log"
9978
9979         class _iface_class(SlotObject):
9980                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9981                         "dblinkElog", "fetch", "register", "schedule",
9982                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9983                         "unregister")
9984
9985         class _fetch_iface_class(SlotObject):
9986                 __slots__ = ("log_file", "schedule")
9987
9988         _task_queues_class = slot_dict_class(
9989                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9990
9991         class _build_opts_class(SlotObject):
9992                 __slots__ = ("buildpkg", "buildpkgonly",
9993                         "fetch_all_uri", "fetchonly", "pretend")
9994
9995         class _binpkg_opts_class(SlotObject):
9996                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9997
9998         class _pkg_count_class(SlotObject):
9999                 __slots__ = ("curval", "maxval")
10000
10001         class _emerge_log_class(SlotObject):
10002                 __slots__ = ("xterm_titles",)
10003
10004                 def log(self, *pargs, **kwargs):
10005                         if not self.xterm_titles:
10006                                 # Avoid interference with the scheduler's status display.
10007                                 kwargs.pop("short_msg", None)
10008                         emergelog(self.xterm_titles, *pargs, **kwargs)
10009
10010         class _failed_pkg(SlotObject):
10011                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10012
10013         class _ConfigPool(object):
10014                 """Interface for a task to temporarily allocate a config
10015                 instance from a pool. This allows a task to be constructed
10016                 long before the config instance actually becomes needed, like
10017                 when prefetchers are constructed for the whole merge list."""
10018                 __slots__ = ("_root", "_allocate", "_deallocate")
10019                 def __init__(self, root, allocate, deallocate):
10020                         self._root = root
10021                         self._allocate = allocate
10022                         self._deallocate = deallocate
10023                 def allocate(self):
10024                         return self._allocate(self._root)
10025                 def deallocate(self, settings):
10026                         self._deallocate(settings)
10027
10028         class _unknown_internal_error(portage.exception.PortageException):
10029                 """
10030                 Used internally to terminate scheduling. The specific reason for
10031                 the failure should have been dumped to stderr.
10032                 """
10033                 def __init__(self, value=""):
10034                         portage.exception.PortageException.__init__(self, value)
10035
10036         def __init__(self, settings, trees, mtimedb, myopts,
10037                 spinner, mergelist, favorites, digraph):
10038                 PollScheduler.__init__(self)
10039                 self.settings = settings
10040                 self.target_root = settings["ROOT"]
10041                 self.trees = trees
10042                 self.myopts = myopts
10043                 self._spinner = spinner
10044                 self._mtimedb = mtimedb
10045                 self._mergelist = mergelist
10046                 self._favorites = favorites
10047                 self._args_set = InternalPackageSet(favorites)
10048                 self._build_opts = self._build_opts_class()
10049                 for k in self._build_opts.__slots__:
10050                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10051                 self._binpkg_opts = self._binpkg_opts_class()
10052                 for k in self._binpkg_opts.__slots__:
10053                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10054
10055                 self.curval = 0
10056                 self._logger = self._emerge_log_class()
10057                 self._task_queues = self._task_queues_class()
10058                 for k in self._task_queues.allowed_keys:
10059                         setattr(self._task_queues, k,
10060                                 SequentialTaskQueue())
10061
10062                 # Holds merges that will wait to be executed when no builds are
10063                 # executing. This is useful for system packages since dependencies
10064                 # on system packages are frequently unspecified.
10065                 self._merge_wait_queue = []
10066                 # Holds merges that have been transfered from the merge_wait_queue to
10067                 # the actual merge queue. They are removed from this list upon
10068                 # completion. Other packages can start building only when this list is
10069                 # empty.
10070                 self._merge_wait_scheduled = []
10071
10072                 # Holds system packages and their deep runtime dependencies. Before
10073                 # being merged, these packages go to merge_wait_queue, to be merged
10074                 # when no other packages are building.
10075                 self._deep_system_deps = set()
10076
10077                 # Holds packages to merge which will satisfy currently unsatisfied
10078                 # deep runtime dependencies of system packages. If this is not empty
10079                 # then no parallel builds will be spawned until it is empty. This
10080                 # minimizes the possibility that a build will fail due to the system
10081                 # being in a fragile state. For example, see bug #259954.
10082                 self._unsatisfied_system_deps = set()
10083
10084                 self._status_display = JobStatusDisplay()
10085                 self._max_load = myopts.get("--load-average")
10086                 max_jobs = myopts.get("--jobs")
10087                 if max_jobs is None:
10088                         max_jobs = 1
10089                 self._set_max_jobs(max_jobs)
10090
10091                 # The root where the currently running
10092                 # portage instance is installed.
10093                 self._running_root = trees["/"]["root_config"]
10094                 self.edebug = 0
10095                 if settings.get("PORTAGE_DEBUG", "") == "1":
10096                         self.edebug = 1
10097                 self.pkgsettings = {}
10098                 self._config_pool = {}
10099                 self._blocker_db = {}
10100                 for root in trees:
10101                         self._config_pool[root] = []
10102                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10103
10104                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10105                         schedule=self._schedule_fetch)
10106                 self._sched_iface = self._iface_class(
10107                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10108                         dblinkDisplayMerge=self._dblink_display_merge,
10109                         dblinkElog=self._dblink_elog,
10110                         fetch=fetch_iface, register=self._register,
10111                         schedule=self._schedule_wait,
10112                         scheduleSetup=self._schedule_setup,
10113                         scheduleUnpack=self._schedule_unpack,
10114                         scheduleYield=self._schedule_yield,
10115                         unregister=self._unregister)
10116
10117                 self._prefetchers = weakref.WeakValueDictionary()
10118                 self._pkg_queue = []
10119                 self._completed_tasks = set()
10120
10121                 self._failed_pkgs = []
10122                 self._failed_pkgs_all = []
10123                 self._failed_pkgs_die_msgs = []
10124                 self._post_mod_echo_msgs = []
10125                 self._parallel_fetch = False
10126                 merge_count = len([x for x in mergelist \
10127                         if isinstance(x, Package) and x.operation == "merge"])
10128                 self._pkg_count = self._pkg_count_class(
10129                         curval=0, maxval=merge_count)
10130                 self._status_display.maxval = self._pkg_count.maxval
10131
10132                 # The load average takes some time to respond when new
10133                 # jobs are added, so we need to limit the rate of adding
10134                 # new jobs.
10135                 self._job_delay_max = 10
10136                 self._job_delay_factor = 1.0
10137                 self._job_delay_exp = 1.5
10138                 self._previous_job_start_time = None
10139
10140                 self._set_digraph(digraph)
10141
10142                 # This is used to memoize the _choose_pkg() result when
10143                 # no packages can be chosen until one of the existing
10144                 # jobs completes.
10145                 self._choose_pkg_return_early = False
10146
10147                 features = self.settings.features
10148                 if "parallel-fetch" in features and \
10149                         not ("--pretend" in self.myopts or \
10150                         "--fetch-all-uri" in self.myopts or \
10151                         "--fetchonly" in self.myopts):
10152                         if "distlocks" not in features:
10153                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10154                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10155                                         "requires the distlocks feature enabled"+"\n",
10156                                         noiselevel=-1)
10157                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10158                                         "thus parallel-fetching is being disabled"+"\n",
10159                                         noiselevel=-1)
10160                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10161                         elif len(mergelist) > 1:
10162                                 self._parallel_fetch = True
10163
10164                 if self._parallel_fetch:
10165                                 # clear out existing fetch log if it exists
10166                                 try:
10167                                         open(self._fetch_log, 'w')
10168                                 except EnvironmentError:
10169                                         pass
10170
10171                 self._running_portage = None
10172                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10173                         portage.const.PORTAGE_PACKAGE_ATOM)
10174                 if portage_match:
10175                         cpv = portage_match.pop()
10176                         self._running_portage = self._pkg(cpv, "installed",
10177                                 self._running_root, installed=True)
10178
10179         def _poll(self, timeout=None):
10180                 self._schedule()
10181                 PollScheduler._poll(self, timeout=timeout)
10182
10183         def _set_max_jobs(self, max_jobs):
10184                 self._max_jobs = max_jobs
10185                 self._task_queues.jobs.max_jobs = max_jobs
10186
10187         def _background_mode(self):
10188                 """
10189                 Check if background mode is enabled and adjust states as necessary.
10190
10191                 @rtype: bool
10192                 @returns: True if background mode is enabled, False otherwise.
10193                 """
10194                 background = (self._max_jobs is True or \
10195                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10196                         not bool(self._opts_no_background.intersection(self.myopts))
10197
10198                 if background:
10199                         interactive_tasks = self._get_interactive_tasks()
10200                         if interactive_tasks:
10201                                 background = False
10202                                 writemsg_level(">>> Sending package output to stdio due " + \
10203                                         "to interactive package(s):\n",
10204                                         level=logging.INFO, noiselevel=-1)
10205                                 msg = [""]
10206                                 for pkg in interactive_tasks:
10207                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10208                                         if pkg.root != "/":
10209                                                 pkg_str += " for " + pkg.root
10210                                         msg.append(pkg_str)
10211                                 msg.append("")
10212                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10213                                         level=logging.INFO, noiselevel=-1)
10214                                 if self._max_jobs is True or self._max_jobs > 1:
10215                                         self._set_max_jobs(1)
10216                                         writemsg_level(">>> Setting --jobs=1 due " + \
10217                                                 "to the above interactive package(s)\n",
10218                                                 level=logging.INFO, noiselevel=-1)
10219
10220                 self._status_display.quiet = \
10221                         not background or \
10222                         ("--quiet" in self.myopts and \
10223                         "--verbose" not in self.myopts)
10224
10225                 self._logger.xterm_titles = \
10226                         "notitles" not in self.settings.features and \
10227                         self._status_display.quiet
10228
10229                 return background
10230
10231         def _get_interactive_tasks(self):
10232                 from portage import flatten
10233                 from portage.dep import use_reduce, paren_reduce
10234                 interactive_tasks = []
10235                 for task in self._mergelist:
10236                         if not (isinstance(task, Package) and \
10237                                 task.operation == "merge"):
10238                                 continue
10239                         try:
10240                                 properties = flatten(use_reduce(paren_reduce(
10241                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10242                         except portage.exception.InvalidDependString, e:
10243                                 show_invalid_depstring_notice(task,
10244                                         task.metadata["PROPERTIES"], str(e))
10245                                 raise self._unknown_internal_error()
10246                         if "interactive" in properties:
10247                                 interactive_tasks.append(task)
10248                 return interactive_tasks
10249
10250         def _set_digraph(self, digraph):
10251                 if "--nodeps" in self.myopts or \
10252                         (self._max_jobs is not True and self._max_jobs < 2):
10253                         # save some memory
10254                         self._digraph = None
10255                         return
10256
10257                 self._digraph = digraph
10258                 self._find_system_deps()
10259                 self._prune_digraph()
10260                 self._prevent_builddir_collisions()
10261
10262         def _find_system_deps(self):
10263                 """
10264                 Find system packages and their deep runtime dependencies. Before being
10265                 merged, these packages go to merge_wait_queue, to be merged when no
10266                 other packages are building.
10267                 """
10268                 deep_system_deps = self._deep_system_deps
10269                 deep_system_deps.clear()
10270                 deep_system_deps.update(
10271                         _find_deep_system_runtime_deps(self._digraph))
10272                 deep_system_deps.difference_update([pkg for pkg in \
10273                         deep_system_deps if pkg.operation != "merge"])
10274
10275         def _prune_digraph(self):
10276                 """
10277                 Prune any root nodes that are irrelevant.
10278                 """
10279
10280                 graph = self._digraph
10281                 completed_tasks = self._completed_tasks
10282                 removed_nodes = set()
10283                 while True:
10284                         for node in graph.root_nodes():
10285                                 if not isinstance(node, Package) or \
10286                                         (node.installed and node.operation == "nomerge") or \
10287                                         node.onlydeps or \
10288                                         node in completed_tasks:
10289                                         removed_nodes.add(node)
10290                         if removed_nodes:
10291                                 graph.difference_update(removed_nodes)
10292                         if not removed_nodes:
10293                                 break
10294                         removed_nodes.clear()
10295
10296         def _prevent_builddir_collisions(self):
10297                 """
10298                 When building stages, sometimes the same exact cpv needs to be merged
10299                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10300                 in the builddir. Currently, normal file locks would be inappropriate
10301                 for this purpose since emerge holds all of it's build dir locks from
10302                 the main process.
10303                 """
10304                 cpv_map = {}
10305                 for pkg in self._mergelist:
10306                         if not isinstance(pkg, Package):
10307                                 # a satisfied blocker
10308                                 continue
10309                         if pkg.installed:
10310                                 continue
10311                         if pkg.cpv not in cpv_map:
10312                                 cpv_map[pkg.cpv] = [pkg]
10313                                 continue
10314                         for earlier_pkg in cpv_map[pkg.cpv]:
10315                                 self._digraph.add(earlier_pkg, pkg,
10316                                         priority=DepPriority(buildtime=True))
10317                         cpv_map[pkg.cpv].append(pkg)
10318
10319         class _pkg_failure(portage.exception.PortageException):
10320                 """
10321                 An instance of this class is raised by unmerge() when
10322                 an uninstallation fails.
10323                 """
10324                 status = 1
10325                 def __init__(self, *pargs):
10326                         portage.exception.PortageException.__init__(self, pargs)
10327                         if pargs:
10328                                 self.status = pargs[0]
10329
10330         def _schedule_fetch(self, fetcher):
10331                 """
10332                 Schedule a fetcher on the fetch queue, in order to
10333                 serialize access to the fetch log.
10334                 """
10335                 self._task_queues.fetch.addFront(fetcher)
10336
10337         def _schedule_setup(self, setup_phase):
10338                 """
10339                 Schedule a setup phase on the merge queue, in order to
10340                 serialize unsandboxed access to the live filesystem.
10341                 """
10342                 self._task_queues.merge.addFront(setup_phase)
10343                 self._schedule()
10344
10345         def _schedule_unpack(self, unpack_phase):
10346                 """
10347                 Schedule an unpack phase on the unpack queue, in order
10348                 to serialize $DISTDIR access for live ebuilds.
10349                 """
10350                 self._task_queues.unpack.add(unpack_phase)
10351
10352         def _find_blockers(self, new_pkg):
10353                 """
10354                 Returns a callable which should be called only when
10355                 the vdb lock has been acquired.
10356                 """
10357                 def get_blockers():
10358                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10359                 return get_blockers
10360
10361         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10362                 if self._opts_ignore_blockers.intersection(self.myopts):
10363                         return None
10364
10365                 # Call gc.collect() here to avoid heap overflow that
10366                 # triggers 'Cannot allocate memory' errors (reported
10367                 # with python-2.5).
10368                 import gc
10369                 gc.collect()
10370
10371                 blocker_db = self._blocker_db[new_pkg.root]
10372
10373                 blocker_dblinks = []
10374                 for blocking_pkg in blocker_db.findInstalledBlockers(
10375                         new_pkg, acquire_lock=acquire_lock):
10376                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10377                                 continue
10378                         if new_pkg.cpv == blocking_pkg.cpv:
10379                                 continue
10380                         blocker_dblinks.append(portage.dblink(
10381                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10382                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10383                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10384
10385                 gc.collect()
10386
10387                 return blocker_dblinks
10388
10389         def _dblink_pkg(self, pkg_dblink):
10390                 cpv = pkg_dblink.mycpv
10391                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10392                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10393                 installed = type_name == "installed"
10394                 return self._pkg(cpv, type_name, root_config, installed=installed)
10395
10396         def _append_to_log_path(self, log_path, msg):
10397                 f = open(log_path, 'a')
10398                 try:
10399                         f.write(msg)
10400                 finally:
10401                         f.close()
10402
10403         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10404
10405                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10406                 log_file = None
10407                 out = sys.stdout
10408                 background = self._background
10409
10410                 if background and log_path is not None:
10411                         log_file = open(log_path, 'a')
10412                         out = log_file
10413
10414                 try:
10415                         for msg in msgs:
10416                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10417                 finally:
10418                         if log_file is not None:
10419                                 log_file.close()
10420
10421         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10422                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10423                 background = self._background
10424
10425                 if log_path is None:
10426                         if not (background and level < logging.WARN):
10427                                 portage.util.writemsg_level(msg,
10428                                         level=level, noiselevel=noiselevel)
10429                 else:
10430                         if not background:
10431                                 portage.util.writemsg_level(msg,
10432                                         level=level, noiselevel=noiselevel)
10433                         self._append_to_log_path(log_path, msg)
10434
10435         def _dblink_ebuild_phase(self,
10436                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10437                 """
10438                 Using this callback for merge phases allows the scheduler
10439                 to run while these phases execute asynchronously, and allows
10440                 the scheduler control output handling.
10441                 """
10442
10443                 scheduler = self._sched_iface
10444                 settings = pkg_dblink.settings
10445                 pkg = self._dblink_pkg(pkg_dblink)
10446                 background = self._background
10447                 log_path = settings.get("PORTAGE_LOG_FILE")
10448
10449                 ebuild_phase = EbuildPhase(background=background,
10450                         pkg=pkg, phase=phase, scheduler=scheduler,
10451                         settings=settings, tree=pkg_dblink.treetype)
10452                 ebuild_phase.start()
10453                 ebuild_phase.wait()
10454
10455                 return ebuild_phase.returncode
10456
10457         def _generate_digests(self):
10458                 """
10459                 Generate digests if necessary for --digests or FEATURES=digest.
10460                 In order to avoid interference, this must done before parallel
10461                 tasks are started.
10462                 """
10463
10464                 if '--fetchonly' in self.myopts:
10465                         return os.EX_OK
10466
10467                 digest = '--digest' in self.myopts
10468                 if not digest:
10469                         for pkgsettings in self.pkgsettings.itervalues():
10470                                 if 'digest' in pkgsettings.features:
10471                                         digest = True
10472                                         break
10473
10474                 if not digest:
10475                         return os.EX_OK
10476
10477                 for x in self._mergelist:
10478                         if not isinstance(x, Package) or \
10479                                 x.type_name != 'ebuild' or \
10480                                 x.operation != 'merge':
10481                                 continue
10482                         pkgsettings = self.pkgsettings[x.root]
10483                         if '--digest' not in self.myopts and \
10484                                 'digest' not in pkgsettings.features:
10485                                 continue
10486                         portdb = x.root_config.trees['porttree'].dbapi
10487                         ebuild_path = portdb.findname(x.cpv)
10488                         if not ebuild_path:
10489                                 writemsg_level(
10490                                         "!!! Could not locate ebuild for '%s'.\n" \
10491                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10492                                 return 1
10493                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10494                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10495                                 writemsg_level(
10496                                         "!!! Unable to generate manifest for '%s'.\n" \
10497                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10498                                 return 1
10499
10500                 return os.EX_OK
10501
10502         def _check_manifests(self):
10503                 # Verify all the manifests now so that the user is notified of failure
10504                 # as soon as possible.
10505                 if "strict" not in self.settings.features or \
10506                         "--fetchonly" in self.myopts or \
10507                         "--fetch-all-uri" in self.myopts:
10508                         return os.EX_OK
10509
10510                 shown_verifying_msg = False
10511                 quiet_settings = {}
10512                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10513                         quiet_config = portage.config(clone=pkgsettings)
10514                         quiet_config["PORTAGE_QUIET"] = "1"
10515                         quiet_config.backup_changes("PORTAGE_QUIET")
10516                         quiet_settings[myroot] = quiet_config
10517                         del quiet_config
10518
10519                 for x in self._mergelist:
10520                         if not isinstance(x, Package) or \
10521                                 x.type_name != "ebuild":
10522                                 continue
10523
10524                         if not shown_verifying_msg:
10525                                 shown_verifying_msg = True
10526                                 self._status_msg("Verifying ebuild manifests")
10527
10528                         root_config = x.root_config
10529                         portdb = root_config.trees["porttree"].dbapi
10530                         quiet_config = quiet_settings[root_config.root]
10531                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10532                         if not portage.digestcheck([], quiet_config, strict=True):
10533                                 return 1
10534
10535                 return os.EX_OK
10536
10537         def _add_prefetchers(self):
10538
10539                 if not self._parallel_fetch:
10540                         return
10541
10542                 if self._parallel_fetch:
10543                         self._status_msg("Starting parallel fetch")
10544
10545                         prefetchers = self._prefetchers
10546                         getbinpkg = "--getbinpkg" in self.myopts
10547
10548                         # In order to avoid "waiting for lock" messages
10549                         # at the beginning, which annoy users, never
10550                         # spawn a prefetcher for the first package.
10551                         for pkg in self._mergelist[1:]:
10552                                 prefetcher = self._create_prefetcher(pkg)
10553                                 if prefetcher is not None:
10554                                         self._task_queues.fetch.add(prefetcher)
10555                                         prefetchers[pkg] = prefetcher
10556
10557         def _create_prefetcher(self, pkg):
10558                 """
10559                 @return: a prefetcher, or None if not applicable
10560                 """
10561                 prefetcher = None
10562
10563                 if not isinstance(pkg, Package):
10564                         pass
10565
10566                 elif pkg.type_name == "ebuild":
10567
10568                         prefetcher = EbuildFetcher(background=True,
10569                                 config_pool=self._ConfigPool(pkg.root,
10570                                 self._allocate_config, self._deallocate_config),
10571                                 fetchonly=1, logfile=self._fetch_log,
10572                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10573
10574                 elif pkg.type_name == "binary" and \
10575                         "--getbinpkg" in self.myopts and \
10576                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10577
10578                         prefetcher = BinpkgPrefetcher(background=True,
10579                                 pkg=pkg, scheduler=self._sched_iface)
10580
10581                 return prefetcher
10582
10583         def _is_restart_scheduled(self):
10584                 """
10585                 Check if the merge list contains a replacement
10586                 for the current running instance, that will result
10587                 in restart after merge.
10588                 @rtype: bool
10589                 @returns: True if a restart is scheduled, False otherwise.
10590                 """
10591                 if self._opts_no_restart.intersection(self.myopts):
10592                         return False
10593
10594                 mergelist = self._mergelist
10595
10596                 for i, pkg in enumerate(mergelist):
10597                         if self._is_restart_necessary(pkg) and \
10598                                 i != len(mergelist) - 1:
10599                                 return True
10600
10601                 return False
10602
10603         def _is_restart_necessary(self, pkg):
10604                 """
10605                 @return: True if merging the given package
10606                         requires restart, False otherwise.
10607                 """
10608
10609                 # Figure out if we need a restart.
10610                 if pkg.root == self._running_root.root and \
10611                         portage.match_from_list(
10612                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10613                         if self._running_portage:
10614                                 return pkg.cpv != self._running_portage.cpv
10615                         return True
10616                 return False
10617
10618         def _restart_if_necessary(self, pkg):
10619                 """
10620                 Use execv() to restart emerge. This happens
10621                 if portage upgrades itself and there are
10622                 remaining packages in the list.
10623                 """
10624
10625                 if self._opts_no_restart.intersection(self.myopts):
10626                         return
10627
10628                 if not self._is_restart_necessary(pkg):
10629                         return
10630
10631                 if pkg == self._mergelist[-1]:
10632                         return
10633
10634                 self._main_loop_cleanup()
10635
10636                 logger = self._logger
10637                 pkg_count = self._pkg_count
10638                 mtimedb = self._mtimedb
10639                 bad_resume_opts = self._bad_resume_opts
10640
10641                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10642                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10643
10644                 logger.log(" *** RESTARTING " + \
10645                         "emerge via exec() after change of " + \
10646                         "portage version.")
10647
10648                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10649                 mtimedb.commit()
10650                 portage.run_exitfuncs()
10651                 mynewargv = [sys.argv[0], "--resume"]
10652                 resume_opts = self.myopts.copy()
10653                 # For automatic resume, we need to prevent
10654                 # any of bad_resume_opts from leaking in
10655                 # via EMERGE_DEFAULT_OPTS.
10656                 resume_opts["--ignore-default-opts"] = True
10657                 for myopt, myarg in resume_opts.iteritems():
10658                         if myopt not in bad_resume_opts:
10659                                 if myarg is True:
10660                                         mynewargv.append(myopt)
10661                                 else:
10662                                         mynewargv.append(myopt +"="+ str(myarg))
10663                 # priority only needs to be adjusted on the first run
10664                 os.environ["PORTAGE_NICENESS"] = "0"
10665                 os.execv(mynewargv[0], mynewargv)
10666
10667         def merge(self):
10668
10669                 if "--resume" in self.myopts:
10670                         # We're resuming.
10671                         portage.writemsg_stdout(
10672                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10673                         self._logger.log(" *** Resuming merge...")
10674
10675                 self._save_resume_list()
10676
10677                 try:
10678                         self._background = self._background_mode()
10679                 except self._unknown_internal_error:
10680                         return 1
10681
10682                 for root in self.trees:
10683                         root_config = self.trees[root]["root_config"]
10684
10685                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10686                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10687                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10688                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10689                         if not tmpdir or not os.path.isdir(tmpdir):
10690                                 msg = "The directory specified in your " + \
10691                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10692                                 "does not exist. Please create this " + \
10693                                 "directory or correct your PORTAGE_TMPDIR setting."
10694                                 msg = textwrap.wrap(msg, 70)
10695                                 out = portage.output.EOutput()
10696                                 for l in msg:
10697                                         out.eerror(l)
10698                                 return 1
10699
10700                         if self._background:
10701                                 root_config.settings.unlock()
10702                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10703                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10704                                 root_config.settings.lock()
10705
10706                         self.pkgsettings[root] = portage.config(
10707                                 clone=root_config.settings)
10708
10709                 rval = self._generate_digests()
10710                 if rval != os.EX_OK:
10711                         return rval
10712
10713                 rval = self._check_manifests()
10714                 if rval != os.EX_OK:
10715                         return rval
10716
10717                 keep_going = "--keep-going" in self.myopts
10718                 fetchonly = self._build_opts.fetchonly
10719                 mtimedb = self._mtimedb
10720                 failed_pkgs = self._failed_pkgs
10721
10722                 while True:
10723                         rval = self._merge()
10724                         if rval == os.EX_OK or fetchonly or not keep_going:
10725                                 break
10726                         if "resume" not in mtimedb:
10727                                 break
10728                         mergelist = self._mtimedb["resume"].get("mergelist")
10729                         if not mergelist:
10730                                 break
10731
10732                         if not failed_pkgs:
10733                                 break
10734
10735                         for failed_pkg in failed_pkgs:
10736                                 mergelist.remove(list(failed_pkg.pkg))
10737
10738                         self._failed_pkgs_all.extend(failed_pkgs)
10739                         del failed_pkgs[:]
10740
10741                         if not mergelist:
10742                                 break
10743
10744                         if not self._calc_resume_list():
10745                                 break
10746
10747                         clear_caches(self.trees)
10748                         if not self._mergelist:
10749                                 break
10750
10751                         self._save_resume_list()
10752                         self._pkg_count.curval = 0
10753                         self._pkg_count.maxval = len([x for x in self._mergelist \
10754                                 if isinstance(x, Package) and x.operation == "merge"])
10755                         self._status_display.maxval = self._pkg_count.maxval
10756
10757                 self._logger.log(" *** Finished. Cleaning up...")
10758
10759                 if failed_pkgs:
10760                         self._failed_pkgs_all.extend(failed_pkgs)
10761                         del failed_pkgs[:]
10762
10763                 background = self._background
10764                 failure_log_shown = False
10765                 if background and len(self._failed_pkgs_all) == 1:
10766                         # If only one package failed then just show it's
10767                         # whole log for easy viewing.
10768                         failed_pkg = self._failed_pkgs_all[-1]
10769                         build_dir = failed_pkg.build_dir
10770                         log_file = None
10771
10772                         log_paths = [failed_pkg.build_log]
10773
10774                         log_path = self._locate_failure_log(failed_pkg)
10775                         if log_path is not None:
10776                                 try:
10777                                         log_file = open(log_path)
10778                                 except IOError:
10779                                         pass
10780
10781                         if log_file is not None:
10782                                 try:
10783                                         for line in log_file:
10784                                                 writemsg_level(line, noiselevel=-1)
10785                                 finally:
10786                                         log_file.close()
10787                                 failure_log_shown = True
10788
10789                 # Dump mod_echo output now since it tends to flood the terminal.
10790                 # This allows us to avoid having more important output, generated
10791                 # later, from being swept away by the mod_echo output.
10792                 mod_echo_output =  _flush_elog_mod_echo()
10793
10794                 if background and not failure_log_shown and \
10795                         self._failed_pkgs_all and \
10796                         self._failed_pkgs_die_msgs and \
10797                         not mod_echo_output:
10798
10799                         printer = portage.output.EOutput()
10800                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10801                                 root_msg = ""
10802                                 if mysettings["ROOT"] != "/":
10803                                         root_msg = " merged to %s" % mysettings["ROOT"]
10804                                 print
10805                                 printer.einfo("Error messages for package %s%s:" % \
10806                                         (colorize("INFORM", key), root_msg))
10807                                 print
10808                                 for phase in portage.const.EBUILD_PHASES:
10809                                         if phase not in logentries:
10810                                                 continue
10811                                         for msgtype, msgcontent in logentries[phase]:
10812                                                 if isinstance(msgcontent, basestring):
10813                                                         msgcontent = [msgcontent]
10814                                                 for line in msgcontent:
10815                                                         printer.eerror(line.strip("\n"))
10816
10817                 if self._post_mod_echo_msgs:
10818                         for msg in self._post_mod_echo_msgs:
10819                                 msg()
10820
10821                 if len(self._failed_pkgs_all) > 1 or \
10822                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10823                         if len(self._failed_pkgs_all) > 1:
10824                                 msg = "The following %d packages have " % \
10825                                         len(self._failed_pkgs_all) + \
10826                                         "failed to build or install:"
10827                         else:
10828                                 msg = "The following package has " + \
10829                                         "failed to build or install:"
10830                         prefix = bad(" * ")
10831                         writemsg(prefix + "\n", noiselevel=-1)
10832                         from textwrap import wrap
10833                         for line in wrap(msg, 72):
10834                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10835                         writemsg(prefix + "\n", noiselevel=-1)
10836                         for failed_pkg in self._failed_pkgs_all:
10837                                 writemsg("%s\t%s\n" % (prefix,
10838                                         colorize("INFORM", str(failed_pkg.pkg))),
10839                                         noiselevel=-1)
10840                         writemsg(prefix + "\n", noiselevel=-1)
10841
10842                 return rval
10843
10844         def _elog_listener(self, mysettings, key, logentries, fulltext):
10845                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10846                 if errors:
10847                         self._failed_pkgs_die_msgs.append(
10848                                 (mysettings, key, errors))
10849
10850         def _locate_failure_log(self, failed_pkg):
10851
10852                 build_dir = failed_pkg.build_dir
10853                 log_file = None
10854
10855                 log_paths = [failed_pkg.build_log]
10856
10857                 for log_path in log_paths:
10858                         if not log_path:
10859                                 continue
10860
10861                         try:
10862                                 log_size = os.stat(log_path).st_size
10863                         except OSError:
10864                                 continue
10865
10866                         if log_size == 0:
10867                                 continue
10868
10869                         return log_path
10870
10871                 return None
10872
10873         def _add_packages(self):
10874                 pkg_queue = self._pkg_queue
10875                 for pkg in self._mergelist:
10876                         if isinstance(pkg, Package):
10877                                 pkg_queue.append(pkg)
10878                         elif isinstance(pkg, Blocker):
10879                                 pass
10880
10881         def _system_merge_started(self, merge):
10882                 """
10883                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10884                 """
10885                 graph = self._digraph
10886                 if graph is None:
10887                         return
10888                 pkg = merge.merge.pkg
10889
10890                 # Skip this if $ROOT != / since it shouldn't matter if there
10891                 # are unsatisfied system runtime deps in this case.
10892                 if pkg.root != '/':
10893                         return
10894
10895                 completed_tasks = self._completed_tasks
10896                 unsatisfied = self._unsatisfied_system_deps
10897
10898                 def ignore_non_runtime_or_satisfied(priority):
10899                         """
10900                         Ignore non-runtime and satisfied runtime priorities.
10901                         """
10902                         if isinstance(priority, DepPriority) and \
10903                                 not priority.satisfied and \
10904                                 (priority.runtime or priority.runtime_post):
10905                                 return False
10906                         return True
10907
10908                 # When checking for unsatisfied runtime deps, only check
10909                 # direct deps since indirect deps are checked when the
10910                 # corresponding parent is merged.
10911                 for child in graph.child_nodes(pkg,
10912                         ignore_priority=ignore_non_runtime_or_satisfied):
10913                         if not isinstance(child, Package) or \
10914                                 child.operation == 'uninstall':
10915                                 continue
10916                         if child is pkg:
10917                                 continue
10918                         if child.operation == 'merge' and \
10919                                 child not in completed_tasks:
10920                                 unsatisfied.add(child)
10921
10922         def _merge_wait_exit_handler(self, task):
10923                 self._merge_wait_scheduled.remove(task)
10924                 self._merge_exit(task)
10925
10926         def _merge_exit(self, merge):
10927                 self._do_merge_exit(merge)
10928                 self._deallocate_config(merge.merge.settings)
10929                 if merge.returncode == os.EX_OK and \
10930                         not merge.merge.pkg.installed:
10931                         self._status_display.curval += 1
10932                 self._status_display.merges = len(self._task_queues.merge)
10933                 self._schedule()
10934
10935         def _do_merge_exit(self, merge):
10936                 pkg = merge.merge.pkg
10937                 if merge.returncode != os.EX_OK:
10938                         settings = merge.merge.settings
10939                         build_dir = settings.get("PORTAGE_BUILDDIR")
10940                         build_log = settings.get("PORTAGE_LOG_FILE")
10941
10942                         self._failed_pkgs.append(self._failed_pkg(
10943                                 build_dir=build_dir, build_log=build_log,
10944                                 pkg=pkg,
10945                                 returncode=merge.returncode))
10946                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10947
10948                         self._status_display.failed = len(self._failed_pkgs)
10949                         return
10950
10951                 self._task_complete(pkg)
10952                 pkg_to_replace = merge.merge.pkg_to_replace
10953                 if pkg_to_replace is not None:
10954                         # When a package is replaced, mark it's uninstall
10955                         # task complete (if any).
10956                         uninst_hash_key = \
10957                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10958                         self._task_complete(uninst_hash_key)
10959
10960                 if pkg.installed:
10961                         return
10962
10963                 self._restart_if_necessary(pkg)
10964
10965                 # Call mtimedb.commit() after each merge so that
10966                 # --resume still works after being interrupted
10967                 # by reboot, sigkill or similar.
10968                 mtimedb = self._mtimedb
10969                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10970                 if not mtimedb["resume"]["mergelist"]:
10971                         del mtimedb["resume"]
10972                 mtimedb.commit()
10973
10974         def _build_exit(self, build):
10975                 if build.returncode == os.EX_OK:
10976                         self.curval += 1
10977                         merge = PackageMerge(merge=build)
10978                         if not build.build_opts.buildpkgonly and \
10979                                 build.pkg in self._deep_system_deps:
10980                                 # Since dependencies on system packages are frequently
10981                                 # unspecified, merge them only when no builds are executing.
10982                                 self._merge_wait_queue.append(merge)
10983                                 merge.addStartListener(self._system_merge_started)
10984                         else:
10985                                 merge.addExitListener(self._merge_exit)
10986                                 self._task_queues.merge.add(merge)
10987                                 self._status_display.merges = len(self._task_queues.merge)
10988                 else:
10989                         settings = build.settings
10990                         build_dir = settings.get("PORTAGE_BUILDDIR")
10991                         build_log = settings.get("PORTAGE_LOG_FILE")
10992
10993                         self._failed_pkgs.append(self._failed_pkg(
10994                                 build_dir=build_dir, build_log=build_log,
10995                                 pkg=build.pkg,
10996                                 returncode=build.returncode))
10997                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10998
10999                         self._status_display.failed = len(self._failed_pkgs)
11000                         self._deallocate_config(build.settings)
11001                 self._jobs -= 1
11002                 self._status_display.running = self._jobs
11003                 self._schedule()
11004
11005         def _extract_exit(self, build):
11006                 self._build_exit(build)
11007
11008         def _task_complete(self, pkg):
11009                 self._completed_tasks.add(pkg)
11010                 self._unsatisfied_system_deps.discard(pkg)
11011                 self._choose_pkg_return_early = False
11012
11013         def _merge(self):
11014
11015                 self._add_prefetchers()
11016                 self._add_packages()
11017                 pkg_queue = self._pkg_queue
11018                 failed_pkgs = self._failed_pkgs
11019                 portage.locks._quiet = self._background
11020                 portage.elog._emerge_elog_listener = self._elog_listener
11021                 rval = os.EX_OK
11022
11023                 try:
11024                         self._main_loop()
11025                 finally:
11026                         self._main_loop_cleanup()
11027                         portage.locks._quiet = False
11028                         portage.elog._emerge_elog_listener = None
11029                         if failed_pkgs:
11030                                 rval = failed_pkgs[-1].returncode
11031
11032                 return rval
11033
11034         def _main_loop_cleanup(self):
11035                 del self._pkg_queue[:]
11036                 self._completed_tasks.clear()
11037                 self._deep_system_deps.clear()
11038                 self._unsatisfied_system_deps.clear()
11039                 self._choose_pkg_return_early = False
11040                 self._status_display.reset()
11041                 self._digraph = None
11042                 self._task_queues.fetch.clear()
11043
11044         def _choose_pkg(self):
11045                 """
11046                 Choose a task that has all it's dependencies satisfied.
11047                 """
11048
11049                 if self._choose_pkg_return_early:
11050                         return None
11051
11052                 if self._digraph is None:
11053                         if (self._jobs or self._task_queues.merge) and \
11054                                 not ("--nodeps" in self.myopts and \
11055                                 (self._max_jobs is True or self._max_jobs > 1)):
11056                                 self._choose_pkg_return_early = True
11057                                 return None
11058                         return self._pkg_queue.pop(0)
11059
11060                 if not (self._jobs or self._task_queues.merge):
11061                         return self._pkg_queue.pop(0)
11062
11063                 self._prune_digraph()
11064
11065                 chosen_pkg = None
11066                 later = set(self._pkg_queue)
11067                 for pkg in self._pkg_queue:
11068                         later.remove(pkg)
11069                         if not self._dependent_on_scheduled_merges(pkg, later):
11070                                 chosen_pkg = pkg
11071                                 break
11072
11073                 if chosen_pkg is not None:
11074                         self._pkg_queue.remove(chosen_pkg)
11075
11076                 if chosen_pkg is None:
11077                         # There's no point in searching for a package to
11078                         # choose until at least one of the existing jobs
11079                         # completes.
11080                         self._choose_pkg_return_early = True
11081
11082                 return chosen_pkg
11083
11084         def _dependent_on_scheduled_merges(self, pkg, later):
11085                 """
11086                 Traverse the subgraph of the given packages deep dependencies
11087                 to see if it contains any scheduled merges.
11088                 @param pkg: a package to check dependencies for
11089                 @type pkg: Package
11090                 @param later: packages for which dependence should be ignored
11091                         since they will be merged later than pkg anyway and therefore
11092                         delaying the merge of pkg will not result in a more optimal
11093                         merge order
11094                 @type later: set
11095                 @rtype: bool
11096                 @returns: True if the package is dependent, False otherwise.
11097                 """
11098
11099                 graph = self._digraph
11100                 completed_tasks = self._completed_tasks
11101
11102                 dependent = False
11103                 traversed_nodes = set([pkg])
11104                 direct_deps = graph.child_nodes(pkg)
11105                 node_stack = direct_deps
11106                 direct_deps = frozenset(direct_deps)
11107                 while node_stack:
11108                         node = node_stack.pop()
11109                         if node in traversed_nodes:
11110                                 continue
11111                         traversed_nodes.add(node)
11112                         if not ((node.installed and node.operation == "nomerge") or \
11113                                 (node.operation == "uninstall" and \
11114                                 node not in direct_deps) or \
11115                                 node in completed_tasks or \
11116                                 node in later):
11117                                 dependent = True
11118                                 break
11119                         node_stack.extend(graph.child_nodes(node))
11120
11121                 return dependent
11122
11123         def _allocate_config(self, root):
11124                 """
11125                 Allocate a unique config instance for a task in order
11126                 to prevent interference between parallel tasks.
11127                 """
11128                 if self._config_pool[root]:
11129                         temp_settings = self._config_pool[root].pop()
11130                 else:
11131                         temp_settings = portage.config(clone=self.pkgsettings[root])
11132                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11133                 # performance reasons, call it here to make sure all settings from the
11134                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11135                 temp_settings.reload()
11136                 temp_settings.reset()
11137                 return temp_settings
11138
11139         def _deallocate_config(self, settings):
11140                 self._config_pool[settings["ROOT"]].append(settings)
11141
11142         def _main_loop(self):
11143
11144                 # Only allow 1 job max if a restart is scheduled
11145                 # due to portage update.
11146                 if self._is_restart_scheduled() or \
11147                         self._opts_no_background.intersection(self.myopts):
11148                         self._set_max_jobs(1)
11149
11150                 merge_queue = self._task_queues.merge
11151
11152                 while self._schedule():
11153                         if self._poll_event_handlers:
11154                                 self._poll_loop()
11155
11156                 while True:
11157                         self._schedule()
11158                         if not (self._jobs or merge_queue):
11159                                 break
11160                         if self._poll_event_handlers:
11161                                 self._poll_loop()
11162
11163         def _keep_scheduling(self):
11164                 return bool(self._pkg_queue and \
11165                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11166
11167         def _schedule_tasks(self):
11168
11169                 # When the number of jobs drops to zero, process all waiting merges.
11170                 if not self._jobs and self._merge_wait_queue:
11171                         for task in self._merge_wait_queue:
11172                                 task.addExitListener(self._merge_wait_exit_handler)
11173                                 self._task_queues.merge.add(task)
11174                         self._status_display.merges = len(self._task_queues.merge)
11175                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11176                         del self._merge_wait_queue[:]
11177
11178                 self._schedule_tasks_imp()
11179                 self._status_display.display()
11180
11181                 state_change = 0
11182                 for q in self._task_queues.values():
11183                         if q.schedule():
11184                                 state_change += 1
11185
11186                 # Cancel prefetchers if they're the only reason
11187                 # the main poll loop is still running.
11188                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11189                         not (self._jobs or self._task_queues.merge) and \
11190                         self._task_queues.fetch:
11191                         self._task_queues.fetch.clear()
11192                         state_change += 1
11193
11194                 if state_change:
11195                         self._schedule_tasks_imp()
11196                         self._status_display.display()
11197
11198                 return self._keep_scheduling()
11199
11200         def _job_delay(self):
11201                 """
11202                 @rtype: bool
11203                 @returns: True if job scheduling should be delayed, False otherwise.
11204                 """
11205
11206                 if self._jobs and self._max_load is not None:
11207
11208                         current_time = time.time()
11209
11210                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11211                         if delay > self._job_delay_max:
11212                                 delay = self._job_delay_max
11213                         if (current_time - self._previous_job_start_time) < delay:
11214                                 return True
11215
11216                 return False
11217
11218         def _schedule_tasks_imp(self):
11219                 """
11220                 @rtype: bool
11221                 @returns: True if state changed, False otherwise.
11222                 """
11223
11224                 state_change = 0
11225
11226                 while True:
11227
11228                         if not self._keep_scheduling():
11229                                 return bool(state_change)
11230
11231                         if self._choose_pkg_return_early or \
11232                                 self._merge_wait_scheduled or \
11233                                 (self._jobs and self._unsatisfied_system_deps) or \
11234                                 not self._can_add_job() or \
11235                                 self._job_delay():
11236                                 return bool(state_change)
11237
11238                         pkg = self._choose_pkg()
11239                         if pkg is None:
11240                                 return bool(state_change)
11241
11242                         state_change += 1
11243
11244                         if not pkg.installed:
11245                                 self._pkg_count.curval += 1
11246
11247                         task = self._task(pkg)
11248
11249                         if pkg.installed:
11250                                 merge = PackageMerge(merge=task)
11251                                 merge.addExitListener(self._merge_exit)
11252                                 self._task_queues.merge.add(merge)
11253
11254                         elif pkg.built:
11255                                 self._jobs += 1
11256                                 self._previous_job_start_time = time.time()
11257                                 self._status_display.running = self._jobs
11258                                 task.addExitListener(self._extract_exit)
11259                                 self._task_queues.jobs.add(task)
11260
11261                         else:
11262                                 self._jobs += 1
11263                                 self._previous_job_start_time = time.time()
11264                                 self._status_display.running = self._jobs
11265                                 task.addExitListener(self._build_exit)
11266                                 self._task_queues.jobs.add(task)
11267
11268                 return bool(state_change)
11269
11270         def _task(self, pkg):
11271
11272                 pkg_to_replace = None
11273                 if pkg.operation != "uninstall":
11274                         vardb = pkg.root_config.trees["vartree"].dbapi
11275                         previous_cpv = vardb.match(pkg.slot_atom)
11276                         if previous_cpv:
11277                                 previous_cpv = previous_cpv.pop()
11278                                 pkg_to_replace = self._pkg(previous_cpv,
11279                                         "installed", pkg.root_config, installed=True)
11280
11281                 task = MergeListItem(args_set=self._args_set,
11282                         background=self._background, binpkg_opts=self._binpkg_opts,
11283                         build_opts=self._build_opts,
11284                         config_pool=self._ConfigPool(pkg.root,
11285                         self._allocate_config, self._deallocate_config),
11286                         emerge_opts=self.myopts,
11287                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11288                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11289                         pkg_to_replace=pkg_to_replace,
11290                         prefetcher=self._prefetchers.get(pkg),
11291                         scheduler=self._sched_iface,
11292                         settings=self._allocate_config(pkg.root),
11293                         statusMessage=self._status_msg,
11294                         world_atom=self._world_atom)
11295
11296                 return task
11297
11298         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11299                 pkg = failed_pkg.pkg
11300                 msg = "%s to %s %s" % \
11301                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11302                 if pkg.root != "/":
11303                         msg += " %s %s" % (preposition, pkg.root)
11304
11305                 log_path = self._locate_failure_log(failed_pkg)
11306                 if log_path is not None:
11307                         msg += ", Log file:"
11308                 self._status_msg(msg)
11309
11310                 if log_path is not None:
11311                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11312
11313         def _status_msg(self, msg):
11314                 """
11315                 Display a brief status message (no newlines) in the status display.
11316                 This is called by tasks to provide feedback to the user. This
11317                 delegates the resposibility of generating \r and \n control characters,
11318                 to guarantee that lines are created or erased when necessary and
11319                 appropriate.
11320
11321                 @type msg: str
11322                 @param msg: a brief status message (no newlines allowed)
11323                 """
11324                 if not self._background:
11325                         writemsg_level("\n")
11326                 self._status_display.displayMessage(msg)
11327
11328         def _save_resume_list(self):
11329                 """
11330                 Do this before verifying the ebuild Manifests since it might
11331                 be possible for the user to use --resume --skipfirst get past
11332                 a non-essential package with a broken digest.
11333                 """
11334                 mtimedb = self._mtimedb
11335                 mtimedb["resume"]["mergelist"] = [list(x) \
11336                         for x in self._mergelist \
11337                         if isinstance(x, Package) and x.operation == "merge"]
11338
11339                 mtimedb.commit()
11340
11341         def _calc_resume_list(self):
11342                 """
11343                 Use the current resume list to calculate a new one,
11344                 dropping any packages with unsatisfied deps.
11345                 @rtype: bool
11346                 @returns: True if successful, False otherwise.
11347                 """
11348                 print colorize("GOOD", "*** Resuming merge...")
11349
11350                 if self._show_list():
11351                         if "--tree" in self.myopts:
11352                                 portage.writemsg_stdout("\n" + \
11353                                         darkgreen("These are the packages that " + \
11354                                         "would be merged, in reverse order:\n\n"))
11355
11356                         else:
11357                                 portage.writemsg_stdout("\n" + \
11358                                         darkgreen("These are the packages that " + \
11359                                         "would be merged, in order:\n\n"))
11360
11361                 show_spinner = "--quiet" not in self.myopts and \
11362                         "--nodeps" not in self.myopts
11363
11364                 if show_spinner:
11365                         print "Calculating dependencies  ",
11366
11367                 myparams = create_depgraph_params(self.myopts, None)
11368                 success = False
11369                 e = None
11370                 try:
11371                         success, mydepgraph, dropped_tasks = resume_depgraph(
11372                                 self.settings, self.trees, self._mtimedb, self.myopts,
11373                                 myparams, self._spinner)
11374                 except depgraph.UnsatisfiedResumeDep, exc:
11375                         # rename variable to avoid python-3.0 error:
11376                         # SyntaxError: can not delete variable 'e' referenced in nested
11377                         #              scope
11378                         e = exc
11379                         mydepgraph = e.depgraph
11380                         dropped_tasks = set()
11381
11382                 if show_spinner:
11383                         print "\b\b... done!"
11384
11385                 if e is not None:
11386                         def unsatisfied_resume_dep_msg():
11387                                 mydepgraph.display_problems()
11388                                 out = portage.output.EOutput()
11389                                 out.eerror("One or more packages are either masked or " + \
11390                                         "have missing dependencies:")
11391                                 out.eerror("")
11392                                 indent = "  "
11393                                 show_parents = set()
11394                                 for dep in e.value:
11395                                         if dep.parent in show_parents:
11396                                                 continue
11397                                         show_parents.add(dep.parent)
11398                                         if dep.atom is None:
11399                                                 out.eerror(indent + "Masked package:")
11400                                                 out.eerror(2 * indent + str(dep.parent))
11401                                                 out.eerror("")
11402                                         else:
11403                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11404                                                 out.eerror(2 * indent + str(dep.parent))
11405                                                 out.eerror("")
11406                                 msg = "The resume list contains packages " + \
11407                                         "that are either masked or have " + \
11408                                         "unsatisfied dependencies. " + \
11409                                         "Please restart/continue " + \
11410                                         "the operation manually, or use --skipfirst " + \
11411                                         "to skip the first package in the list and " + \
11412                                         "any other packages that may be " + \
11413                                         "masked or have missing dependencies."
11414                                 for line in textwrap.wrap(msg, 72):
11415                                         out.eerror(line)
11416                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11417                         return False
11418
11419                 if success and self._show_list():
11420                         mylist = mydepgraph.altlist()
11421                         if mylist:
11422                                 if "--tree" in self.myopts:
11423                                         mylist.reverse()
11424                                 mydepgraph.display(mylist, favorites=self._favorites)
11425
11426                 if not success:
11427                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11428                         return False
11429                 mydepgraph.display_problems()
11430
11431                 mylist = mydepgraph.altlist()
11432                 mydepgraph.break_refs(mylist)
11433                 mydepgraph.break_refs(dropped_tasks)
11434                 self._mergelist = mylist
11435                 self._set_digraph(mydepgraph.schedulerGraph())
11436
11437                 msg_width = 75
11438                 for task in dropped_tasks:
11439                         if not (isinstance(task, Package) and task.operation == "merge"):
11440                                 continue
11441                         pkg = task
11442                         msg = "emerge --keep-going:" + \
11443                                 " %s" % (pkg.cpv,)
11444                         if pkg.root != "/":
11445                                 msg += " for %s" % (pkg.root,)
11446                         msg += " dropped due to unsatisfied dependency."
11447                         for line in textwrap.wrap(msg, msg_width):
11448                                 eerror(line, phase="other", key=pkg.cpv)
11449                         settings = self.pkgsettings[pkg.root]
11450                         # Ensure that log collection from $T is disabled inside
11451                         # elog_process(), since any logs that might exist are
11452                         # not valid here.
11453                         settings.pop("T", None)
11454                         portage.elog.elog_process(pkg.cpv, settings)
11455                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11456
11457                 return True
11458
11459         def _show_list(self):
11460                 myopts = self.myopts
11461                 if "--quiet" not in myopts and \
11462                         ("--ask" in myopts or "--tree" in myopts or \
11463                         "--verbose" in myopts):
11464                         return True
11465                 return False
11466
11467         def _world_atom(self, pkg):
11468                 """
11469                 Add the package to the world file, but only if
11470                 it's supposed to be added. Otherwise, do nothing.
11471                 """
11472
11473                 if set(("--buildpkgonly", "--fetchonly",
11474                         "--fetch-all-uri",
11475                         "--oneshot", "--onlydeps",
11476                         "--pretend")).intersection(self.myopts):
11477                         return
11478
11479                 if pkg.root != self.target_root:
11480                         return
11481
11482                 args_set = self._args_set
11483                 if not args_set.findAtomForPackage(pkg):
11484                         return
11485
11486                 logger = self._logger
11487                 pkg_count = self._pkg_count
11488                 root_config = pkg.root_config
11489                 world_set = root_config.sets["world"]
11490                 world_locked = False
11491                 if hasattr(world_set, "lock"):
11492                         world_set.lock()
11493                         world_locked = True
11494
11495                 try:
11496                         if hasattr(world_set, "load"):
11497                                 world_set.load() # maybe it's changed on disk
11498
11499                         atom = create_world_atom(pkg, args_set, root_config)
11500                         if atom:
11501                                 if hasattr(world_set, "add"):
11502                                         self._status_msg(('Recording %s in "world" ' + \
11503                                                 'favorites file...') % atom)
11504                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11505                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11506                                         world_set.add(atom)
11507                                 else:
11508                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11509                                                 (atom,), level=logging.WARN, noiselevel=-1)
11510                 finally:
11511                         if world_locked:
11512                                 world_set.unlock()
11513
11514         def _pkg(self, cpv, type_name, root_config, installed=False):
11515                 """
11516                 Get a package instance from the cache, or create a new
11517                 one if necessary. Raises KeyError from aux_get if it
11518                 failures for some reason (package does not exist or is
11519                 corrupt).
11520                 """
11521                 operation = "merge"
11522                 if installed:
11523                         operation = "nomerge"
11524
11525                 if self._digraph is not None:
11526                         # Reuse existing instance when available.
11527                         pkg = self._digraph.get(
11528                                 (type_name, root_config.root, cpv, operation))
11529                         if pkg is not None:
11530                                 return pkg
11531
11532                 tree_type = depgraph.pkg_tree_map[type_name]
11533                 db = root_config.trees[tree_type].dbapi
11534                 db_keys = list(self.trees[root_config.root][
11535                         tree_type].dbapi._aux_cache_keys)
11536                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11537                 pkg = Package(cpv=cpv, metadata=metadata,
11538                         root_config=root_config, installed=installed)
11539                 if type_name == "ebuild":
11540                         settings = self.pkgsettings[root_config.root]
11541                         settings.setcpv(pkg)
11542                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11543                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11544
11545                 return pkg
11546
11547 class MetadataRegen(PollScheduler):
11548
11549         def __init__(self, portdb, max_jobs=None, max_load=None):
11550                 PollScheduler.__init__(self)
11551                 self._portdb = portdb
11552
11553                 if max_jobs is None:
11554                         max_jobs = 1
11555
11556                 self._max_jobs = max_jobs
11557                 self._max_load = max_load
11558                 self._sched_iface = self._sched_iface_class(
11559                         register=self._register,
11560                         schedule=self._schedule_wait,
11561                         unregister=self._unregister)
11562
11563                 self._valid_pkgs = set()
11564                 self._process_iter = self._iter_metadata_processes()
11565                 self.returncode = os.EX_OK
11566                 self._error_count = 0
11567
11568         def _iter_metadata_processes(self):
11569                 portdb = self._portdb
11570                 valid_pkgs = self._valid_pkgs
11571                 every_cp = portdb.cp_all()
11572                 every_cp.sort(reverse=True)
11573
11574                 while every_cp:
11575                         cp = every_cp.pop()
11576                         portage.writemsg_stdout("Processing %s\n" % cp)
11577                         cpv_list = portdb.cp_list(cp)
11578                         for cpv in cpv_list:
11579                                 valid_pkgs.add(cpv)
11580                                 ebuild_path, repo_path = portdb.findname2(cpv)
11581                                 metadata_process = portdb._metadata_process(
11582                                         cpv, ebuild_path, repo_path)
11583                                 if metadata_process is None:
11584                                         continue
11585                                 yield metadata_process
11586
11587         def run(self):
11588
11589                 portdb = self._portdb
11590                 from portage.cache.cache_errors import CacheError
11591                 dead_nodes = {}
11592
11593                 for mytree in portdb.porttrees:
11594                         try:
11595                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11596                         except CacheError, e:
11597                                 portage.writemsg("Error listing cache entries for " + \
11598                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11599                                 del e
11600                                 dead_nodes = None
11601                                 break
11602
11603                 while self._schedule():
11604                         self._poll_loop()
11605
11606                 while self._jobs:
11607                         self._poll_loop()
11608
11609                 if dead_nodes:
11610                         for y in self._valid_pkgs:
11611                                 for mytree in portdb.porttrees:
11612                                         if portdb.findname2(y, mytree=mytree)[0]:
11613                                                 dead_nodes[mytree].discard(y)
11614
11615                         for mytree, nodes in dead_nodes.iteritems():
11616                                 auxdb = portdb.auxdb[mytree]
11617                                 for y in nodes:
11618                                         try:
11619                                                 del auxdb[y]
11620                                         except (KeyError, CacheError):
11621                                                 pass
11622
11623         def _schedule_tasks(self):
11624                 """
11625                 @rtype: bool
11626                 @returns: True if there may be remaining tasks to schedule,
11627                         False otherwise.
11628                 """
11629                 while self._can_add_job():
11630                         try:
11631                                 metadata_process = self._process_iter.next()
11632                         except StopIteration:
11633                                 return False
11634
11635                         self._jobs += 1
11636                         metadata_process.scheduler = self._sched_iface
11637                         metadata_process.addExitListener(self._metadata_exit)
11638                         metadata_process.start()
11639                 return True
11640
11641         def _metadata_exit(self, metadata_process):
11642                 self._jobs -= 1
11643                 if metadata_process.returncode != os.EX_OK:
11644                         self.returncode = 1
11645                         self._error_count += 1
11646                         self._valid_pkgs.discard(metadata_process.cpv)
11647                         portage.writemsg("Error processing %s, continuing...\n" % \
11648                                 (metadata_process.cpv,))
11649                 self._schedule()
11650
11651 class UninstallFailure(portage.exception.PortageException):
11652         """
11653         An instance of this class is raised by unmerge() when
11654         an uninstallation fails.
11655         """
11656         status = 1
11657         def __init__(self, *pargs):
11658                 portage.exception.PortageException.__init__(self, pargs)
11659                 if pargs:
11660                         self.status = pargs[0]
11661
11662 def unmerge(root_config, myopts, unmerge_action,
11663         unmerge_files, ldpath_mtimes, autoclean=0,
11664         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11665         scheduler=None, writemsg_level=portage.util.writemsg_level):
11666
11667         quiet = "--quiet" in myopts
11668         settings = root_config.settings
11669         sets = root_config.sets
11670         vartree = root_config.trees["vartree"]
11671         candidate_catpkgs=[]
11672         global_unmerge=0
11673         xterm_titles = "notitles" not in settings.features
11674         out = portage.output.EOutput()
11675         pkg_cache = {}
11676         db_keys = list(vartree.dbapi._aux_cache_keys)
11677
11678         def _pkg(cpv):
11679                 pkg = pkg_cache.get(cpv)
11680                 if pkg is None:
11681                         pkg = Package(cpv=cpv, installed=True,
11682                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11683                                 root_config=root_config,
11684                                 type_name="installed")
11685                         pkg_cache[cpv] = pkg
11686                 return pkg
11687
11688         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11689         try:
11690                 # At least the parent needs to exist for the lock file.
11691                 portage.util.ensure_dirs(vdb_path)
11692         except portage.exception.PortageException:
11693                 pass
11694         vdb_lock = None
11695         try:
11696                 if os.access(vdb_path, os.W_OK):
11697                         vdb_lock = portage.locks.lockdir(vdb_path)
11698                 realsyslist = sets["system"].getAtoms()
11699                 syslist = []
11700                 for x in realsyslist:
11701                         mycp = portage.dep_getkey(x)
11702                         if mycp in settings.getvirtuals():
11703                                 providers = []
11704                                 for provider in settings.getvirtuals()[mycp]:
11705                                         if vartree.dbapi.match(provider):
11706                                                 providers.append(provider)
11707                                 if len(providers) == 1:
11708                                         syslist.extend(providers)
11709                         else:
11710                                 syslist.append(mycp)
11711         
11712                 mysettings = portage.config(clone=settings)
11713         
11714                 if not unmerge_files:
11715                         if unmerge_action == "unmerge":
11716                                 print
11717                                 print bold("emerge unmerge") + " can only be used with specific package names"
11718                                 print
11719                                 return 0
11720                         else:
11721                                 global_unmerge = 1
11722         
11723                 localtree = vartree
11724                 # process all arguments and add all
11725                 # valid db entries to candidate_catpkgs
11726                 if global_unmerge:
11727                         if not unmerge_files:
11728                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11729                 else:
11730                         #we've got command-line arguments
11731                         if not unmerge_files:
11732                                 print "\nNo packages to unmerge have been provided.\n"
11733                                 return 0
11734                         for x in unmerge_files:
11735                                 arg_parts = x.split('/')
11736                                 if x[0] not in [".","/"] and \
11737                                         arg_parts[-1][-7:] != ".ebuild":
11738                                         #possible cat/pkg or dep; treat as such
11739                                         candidate_catpkgs.append(x)
11740                                 elif unmerge_action in ["prune","clean"]:
11741                                         print "\n!!! Prune and clean do not accept individual" + \
11742                                                 " ebuilds as arguments;\n    skipping.\n"
11743                                         continue
11744                                 else:
11745                                         # it appears that the user is specifying an installed
11746                                         # ebuild and we're in "unmerge" mode, so it's ok.
11747                                         if not os.path.exists(x):
11748                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11749                                                 return 0
11750         
11751                                         absx   = os.path.abspath(x)
11752                                         sp_absx = absx.split("/")
11753                                         if sp_absx[-1][-7:] == ".ebuild":
11754                                                 del sp_absx[-1]
11755                                                 absx = "/".join(sp_absx)
11756         
11757                                         sp_absx_len = len(sp_absx)
11758         
11759                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11760                                         vdb_len  = len(vdb_path)
11761         
11762                                         sp_vdb     = vdb_path.split("/")
11763                                         sp_vdb_len = len(sp_vdb)
11764         
11765                                         if not os.path.exists(absx+"/CONTENTS"):
11766                                                 print "!!! Not a valid db dir: "+str(absx)
11767                                                 return 0
11768         
11769                                         if sp_absx_len <= sp_vdb_len:
11770                                                 # The Path is shorter... so it can't be inside the vdb.
11771                                                 print sp_absx
11772                                                 print absx
11773                                                 print "\n!!!",x,"cannot be inside "+ \
11774                                                         vdb_path+"; aborting.\n"
11775                                                 return 0
11776         
11777                                         for idx in range(0,sp_vdb_len):
11778                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11779                                                         print sp_absx
11780                                                         print absx
11781                                                         print "\n!!!", x, "is not inside "+\
11782                                                                 vdb_path+"; aborting.\n"
11783                                                         return 0
11784         
11785                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11786                                         candidate_catpkgs.append(
11787                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11788         
11789                 newline=""
11790                 if (not "--quiet" in myopts):
11791                         newline="\n"
11792                 if settings["ROOT"] != "/":
11793                         writemsg_level(darkgreen(newline+ \
11794                                 ">>> Using system located in ROOT tree %s\n" % \
11795                                 settings["ROOT"]))
11796
11797                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11798                         not ("--quiet" in myopts):
11799                         writemsg_level(darkgreen(newline+\
11800                                 ">>> These are the packages that would be unmerged:\n"))
11801
11802                 # Preservation of order is required for --depclean and --prune so
11803                 # that dependencies are respected. Use all_selected to eliminate
11804                 # duplicate packages since the same package may be selected by
11805                 # multiple atoms.
11806                 pkgmap = []
11807                 all_selected = set()
11808                 for x in candidate_catpkgs:
11809                         # cycle through all our candidate deps and determine
11810                         # what will and will not get unmerged
11811                         try:
11812                                 mymatch = vartree.dbapi.match(x)
11813                         except portage.exception.AmbiguousPackageName, errpkgs:
11814                                 print "\n\n!!! The short ebuild name \"" + \
11815                                         x + "\" is ambiguous.  Please specify"
11816                                 print "!!! one of the following fully-qualified " + \
11817                                         "ebuild names instead:\n"
11818                                 for i in errpkgs[0]:
11819                                         print "    " + green(i)
11820                                 print
11821                                 sys.exit(1)
11822         
11823                         if not mymatch and x[0] not in "<>=~":
11824                                 mymatch = localtree.dep_match(x)
11825                         if not mymatch:
11826                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11827                                         (x, unmerge_action), noiselevel=-1)
11828                                 continue
11829
11830                         pkgmap.append(
11831                                 {"protected": set(), "selected": set(), "omitted": set()})
11832                         mykey = len(pkgmap) - 1
11833                         if unmerge_action=="unmerge":
11834                                         for y in mymatch:
11835                                                 if y not in all_selected:
11836                                                         pkgmap[mykey]["selected"].add(y)
11837                                                         all_selected.add(y)
11838                         elif unmerge_action == "prune":
11839                                 if len(mymatch) == 1:
11840                                         continue
11841                                 best_version = mymatch[0]
11842                                 best_slot = vartree.getslot(best_version)
11843                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11844                                 for mypkg in mymatch[1:]:
11845                                         myslot = vartree.getslot(mypkg)
11846                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11847                                         if (myslot == best_slot and mycounter > best_counter) or \
11848                                                 mypkg == portage.best([mypkg, best_version]):
11849                                                 if myslot == best_slot:
11850                                                         if mycounter < best_counter:
11851                                                                 # On slot collision, keep the one with the
11852                                                                 # highest counter since it is the most
11853                                                                 # recently installed.
11854                                                                 continue
11855                                                 best_version = mypkg
11856                                                 best_slot = myslot
11857                                                 best_counter = mycounter
11858                                 pkgmap[mykey]["protected"].add(best_version)
11859                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11860                                         if mypkg != best_version and mypkg not in all_selected)
11861                                 all_selected.update(pkgmap[mykey]["selected"])
11862                         else:
11863                                 # unmerge_action == "clean"
11864                                 slotmap={}
11865                                 for mypkg in mymatch:
11866                                         if unmerge_action == "clean":
11867                                                 myslot = localtree.getslot(mypkg)
11868                                         else:
11869                                                 # since we're pruning, we don't care about slots
11870                                                 # and put all the pkgs in together
11871                                                 myslot = 0
11872                                         if myslot not in slotmap:
11873                                                 slotmap[myslot] = {}
11874                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11875
11876                                 for mypkg in vartree.dbapi.cp_list(
11877                                         portage.dep_getkey(mymatch[0])):
11878                                         myslot = vartree.getslot(mypkg)
11879                                         if myslot not in slotmap:
11880                                                 slotmap[myslot] = {}
11881                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11882
11883                                 for myslot in slotmap:
11884                                         counterkeys = slotmap[myslot].keys()
11885                                         if not counterkeys:
11886                                                 continue
11887                                         counterkeys.sort()
11888                                         pkgmap[mykey]["protected"].add(
11889                                                 slotmap[myslot][counterkeys[-1]])
11890                                         del counterkeys[-1]
11891
11892                                         for counter in counterkeys[:]:
11893                                                 mypkg = slotmap[myslot][counter]
11894                                                 if mypkg not in mymatch:
11895                                                         counterkeys.remove(counter)
11896                                                         pkgmap[mykey]["protected"].add(
11897                                                                 slotmap[myslot][counter])
11898
11899                                         #be pretty and get them in order of merge:
11900                                         for ckey in counterkeys:
11901                                                 mypkg = slotmap[myslot][ckey]
11902                                                 if mypkg not in all_selected:
11903                                                         pkgmap[mykey]["selected"].add(mypkg)
11904                                                         all_selected.add(mypkg)
11905                                         # ok, now the last-merged package
11906                                         # is protected, and the rest are selected
11907                 numselected = len(all_selected)
11908                 if global_unmerge and not numselected:
11909                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11910                         return 0
11911         
11912                 if not numselected:
11913                         portage.writemsg_stdout(
11914                                 "\n>>> No packages selected for removal by " + \
11915                                 unmerge_action + "\n")
11916                         return 0
11917         finally:
11918                 if vdb_lock:
11919                         vartree.dbapi.flush_cache()
11920                         portage.locks.unlockdir(vdb_lock)
11921         
11922         from portage.sets.base import EditablePackageSet
11923         
11924         # generate a list of package sets that are directly or indirectly listed in "world",
11925         # as there is no persistent list of "installed" sets
11926         installed_sets = ["world"]
11927         stop = False
11928         pos = 0
11929         while not stop:
11930                 stop = True
11931                 pos = len(installed_sets)
11932                 for s in installed_sets[pos - 1:]:
11933                         if s not in sets:
11934                                 continue
11935                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11936                         if candidates:
11937                                 stop = False
11938                                 installed_sets += candidates
11939         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11940         del stop, pos
11941
11942         # we don't want to unmerge packages that are still listed in user-editable package sets
11943         # listed in "world" as they would be remerged on the next update of "world" or the 
11944         # relevant package sets.
11945         unknown_sets = set()
11946         for cp in xrange(len(pkgmap)):
11947                 for cpv in pkgmap[cp]["selected"].copy():
11948                         try:
11949                                 pkg = _pkg(cpv)
11950                         except KeyError:
11951                                 # It could have been uninstalled
11952                                 # by a concurrent process.
11953                                 continue
11954
11955                         if unmerge_action != "clean" and \
11956                                 root_config.root == "/" and \
11957                                 portage.match_from_list(
11958                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11959                                 msg = ("Not unmerging package %s since there is no valid " + \
11960                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11961                                 for line in textwrap.wrap(msg, 75):
11962                                         out.eerror(line)
11963                                 # adjust pkgmap so the display output is correct
11964                                 pkgmap[cp]["selected"].remove(cpv)
11965                                 all_selected.remove(cpv)
11966                                 pkgmap[cp]["protected"].add(cpv)
11967                                 continue
11968
11969                         parents = []
11970                         for s in installed_sets:
11971                                 # skip sets that the user requested to unmerge, and skip world 
11972                                 # unless we're unmerging a package set (as the package would be 
11973                                 # removed from "world" later on)
11974                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11975                                         continue
11976
11977                                 if s not in sets:
11978                                         if s in unknown_sets:
11979                                                 continue
11980                                         unknown_sets.add(s)
11981                                         out = portage.output.EOutput()
11982                                         out.eerror(("Unknown set '@%s' in " + \
11983                                                 "%svar/lib/portage/world_sets") % \
11984                                                 (s, root_config.root))
11985                                         continue
11986
11987                                 # only check instances of EditablePackageSet as other classes are generally used for
11988                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11989                                 # user can't do much about them anyway)
11990                                 if isinstance(sets[s], EditablePackageSet):
11991
11992                                         # This is derived from a snippet of code in the
11993                                         # depgraph._iter_atoms_for_pkg() method.
11994                                         for atom in sets[s].iterAtomsForPackage(pkg):
11995                                                 inst_matches = vartree.dbapi.match(atom)
11996                                                 inst_matches.reverse() # descending order
11997                                                 higher_slot = None
11998                                                 for inst_cpv in inst_matches:
11999                                                         try:
12000                                                                 inst_pkg = _pkg(inst_cpv)
12001                                                         except KeyError:
12002                                                                 # It could have been uninstalled
12003                                                                 # by a concurrent process.
12004                                                                 continue
12005
12006                                                         if inst_pkg.cp != atom.cp:
12007                                                                 continue
12008                                                         if pkg >= inst_pkg:
12009                                                                 # This is descending order, and we're not
12010                                                                 # interested in any versions <= pkg given.
12011                                                                 break
12012                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12013                                                                 higher_slot = inst_pkg
12014                                                                 break
12015                                                 if higher_slot is None:
12016                                                         parents.append(s)
12017                                                         break
12018                         if parents:
12019                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12020                                 #print colorize("WARN", "but still listed in the following package sets:")
12021                                 #print "    %s\n" % ", ".join(parents)
12022                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12023                                 print colorize("WARN", "still referenced by the following package sets:")
12024                                 print "    %s\n" % ", ".join(parents)
12025                                 # adjust pkgmap so the display output is correct
12026                                 pkgmap[cp]["selected"].remove(cpv)
12027                                 all_selected.remove(cpv)
12028                                 pkgmap[cp]["protected"].add(cpv)
12029         
12030         del installed_sets
12031
12032         numselected = len(all_selected)
12033         if not numselected:
12034                 writemsg_level(
12035                         "\n>>> No packages selected for removal by " + \
12036                         unmerge_action + "\n")
12037                 return 0
12038
12039         # Unmerge order only matters in some cases
12040         if not ordered:
12041                 unordered = {}
12042                 for d in pkgmap:
12043                         selected = d["selected"]
12044                         if not selected:
12045                                 continue
12046                         cp = portage.cpv_getkey(iter(selected).next())
12047                         cp_dict = unordered.get(cp)
12048                         if cp_dict is None:
12049                                 cp_dict = {}
12050                                 unordered[cp] = cp_dict
12051                                 for k in d:
12052                                         cp_dict[k] = set()
12053                         for k, v in d.iteritems():
12054                                 cp_dict[k].update(v)
12055                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12056
12057         for x in xrange(len(pkgmap)):
12058                 selected = pkgmap[x]["selected"]
12059                 if not selected:
12060                         continue
12061                 for mytype, mylist in pkgmap[x].iteritems():
12062                         if mytype == "selected":
12063                                 continue
12064                         mylist.difference_update(all_selected)
12065                 cp = portage.cpv_getkey(iter(selected).next())
12066                 for y in localtree.dep_match(cp):
12067                         if y not in pkgmap[x]["omitted"] and \
12068                                 y not in pkgmap[x]["selected"] and \
12069                                 y not in pkgmap[x]["protected"] and \
12070                                 y not in all_selected:
12071                                 pkgmap[x]["omitted"].add(y)
12072                 if global_unmerge and not pkgmap[x]["selected"]:
12073                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12074                         continue
12075                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12076                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12077                                 "'%s' is part of your system profile.\n" % cp),
12078                                 level=logging.WARNING, noiselevel=-1)
12079                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12080                                 "be damaging to your system.\n\n"),
12081                                 level=logging.WARNING, noiselevel=-1)
12082                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12083                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12084                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12085                 if not quiet:
12086                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12087                 else:
12088                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12089                 for mytype in ["selected","protected","omitted"]:
12090                         if not quiet:
12091                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12092                         if pkgmap[x][mytype]:
12093                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12094                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12095                                 for pn, ver, rev in sorted_pkgs:
12096                                         if rev == "r0":
12097                                                 myversion = ver
12098                                         else:
12099                                                 myversion = ver + "-" + rev
12100                                         if mytype == "selected":
12101                                                 writemsg_level(
12102                                                         colorize("UNMERGE_WARN", myversion + " "),
12103                                                         noiselevel=-1)
12104                                         else:
12105                                                 writemsg_level(
12106                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12107                         else:
12108                                 writemsg_level("none ", noiselevel=-1)
12109                         if not quiet:
12110                                 writemsg_level("\n", noiselevel=-1)
12111                 if quiet:
12112                         writemsg_level("\n", noiselevel=-1)
12113
12114         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12115                 " packages are slated for removal.\n")
12116         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12117                         " and " + colorize("GOOD", "'omitted'") + \
12118                         " packages will not be removed.\n\n")
12119
12120         if "--pretend" in myopts:
12121                 #we're done... return
12122                 return 0
12123         if "--ask" in myopts:
12124                 if userquery("Would you like to unmerge these packages?")=="No":
12125                         # enter pretend mode for correct formatting of results
12126                         myopts["--pretend"] = True
12127                         print
12128                         print "Quitting."
12129                         print
12130                         return 0
12131         #the real unmerging begins, after a short delay....
12132         if clean_delay and not autoclean:
12133                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12134
12135         for x in xrange(len(pkgmap)):
12136                 for y in pkgmap[x]["selected"]:
12137                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12138                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12139                         mysplit = y.split("/")
12140                         #unmerge...
12141                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12142                                 mysettings, unmerge_action not in ["clean","prune"],
12143                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12144                                 scheduler=scheduler)
12145
12146                         if retval != os.EX_OK:
12147                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12148                                 if raise_on_error:
12149                                         raise UninstallFailure(retval)
12150                                 sys.exit(retval)
12151                         else:
12152                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12153                                         sets["world"].cleanPackage(vartree.dbapi, y)
12154                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12155         if clean_world and hasattr(sets["world"], "remove"):
12156                 for s in root_config.setconfig.active:
12157                         sets["world"].remove(SETPREFIX+s)
12158         return 1
12159
12160 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12161
12162         if os.path.exists("/usr/bin/install-info"):
12163                 out = portage.output.EOutput()
12164                 regen_infodirs=[]
12165                 for z in infodirs:
12166                         if z=='':
12167                                 continue
12168                         inforoot=normpath(root+z)
12169                         if os.path.isdir(inforoot):
12170                                 infomtime = long(os.stat(inforoot).st_mtime)
12171                                 if inforoot not in prev_mtimes or \
12172                                         prev_mtimes[inforoot] != infomtime:
12173                                                 regen_infodirs.append(inforoot)
12174
12175                 if not regen_infodirs:
12176                         portage.writemsg_stdout("\n")
12177                         out.einfo("GNU info directory index is up-to-date.")
12178                 else:
12179                         portage.writemsg_stdout("\n")
12180                         out.einfo("Regenerating GNU info directory index...")
12181
12182                         dir_extensions = ("", ".gz", ".bz2")
12183                         icount=0
12184                         badcount=0
12185                         errmsg = ""
12186                         for inforoot in regen_infodirs:
12187                                 if inforoot=='':
12188                                         continue
12189
12190                                 if not os.path.isdir(inforoot) or \
12191                                         not os.access(inforoot, os.W_OK):
12192                                         continue
12193
12194                                 file_list = os.listdir(inforoot)
12195                                 file_list.sort()
12196                                 dir_file = os.path.join(inforoot, "dir")
12197                                 moved_old_dir = False
12198                                 processed_count = 0
12199                                 for x in file_list:
12200                                         if x.startswith(".") or \
12201                                                 os.path.isdir(os.path.join(inforoot, x)):
12202                                                 continue
12203                                         if x.startswith("dir"):
12204                                                 skip = False
12205                                                 for ext in dir_extensions:
12206                                                         if x == "dir" + ext or \
12207                                                                 x == "dir" + ext + ".old":
12208                                                                 skip = True
12209                                                                 break
12210                                                 if skip:
12211                                                         continue
12212                                         if processed_count == 0:
12213                                                 for ext in dir_extensions:
12214                                                         try:
12215                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12216                                                                 moved_old_dir = True
12217                                                         except EnvironmentError, e:
12218                                                                 if e.errno != errno.ENOENT:
12219                                                                         raise
12220                                                                 del e
12221                                         processed_count += 1
12222                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12223                                         existsstr="already exists, for file `"
12224                                         if myso!="":
12225                                                 if re.search(existsstr,myso):
12226                                                         # Already exists... Don't increment the count for this.
12227                                                         pass
12228                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12229                                                         # This info file doesn't contain a DIR-header: install-info produces this
12230                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12231                                                         # Don't increment the count for this.
12232                                                         pass
12233                                                 else:
12234                                                         badcount=badcount+1
12235                                                         errmsg += myso + "\n"
12236                                         icount=icount+1
12237
12238                                 if moved_old_dir and not os.path.exists(dir_file):
12239                                         # We didn't generate a new dir file, so put the old file
12240                                         # back where it was originally found.
12241                                         for ext in dir_extensions:
12242                                                 try:
12243                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12244                                                 except EnvironmentError, e:
12245                                                         if e.errno != errno.ENOENT:
12246                                                                 raise
12247                                                         del e
12248
12249                                 # Clean dir.old cruft so that they don't prevent
12250                                 # unmerge of otherwise empty directories.
12251                                 for ext in dir_extensions:
12252                                         try:
12253                                                 os.unlink(dir_file + ext + ".old")
12254                                         except EnvironmentError, e:
12255                                                 if e.errno != errno.ENOENT:
12256                                                         raise
12257                                                 del e
12258
12259                                 #update mtime so we can potentially avoid regenerating.
12260                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12261
12262                         if badcount:
12263                                 out.eerror("Processed %d info files; %d errors." % \
12264                                         (icount, badcount))
12265                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12266                         else:
12267                                 if icount > 0:
12268                                         out.einfo("Processed %d info files." % (icount,))
12269
12270
12271 def display_news_notification(root_config, myopts):
12272         target_root = root_config.root
12273         trees = root_config.trees
12274         settings = trees["vartree"].settings
12275         portdb = trees["porttree"].dbapi
12276         vardb = trees["vartree"].dbapi
12277         NEWS_PATH = os.path.join("metadata", "news")
12278         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12279         newsReaderDisplay = False
12280         update = "--pretend" not in myopts
12281
12282         for repo in portdb.getRepositories():
12283                 unreadItems = checkUpdatedNewsItems(
12284                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12285                 if unreadItems:
12286                         if not newsReaderDisplay:
12287                                 newsReaderDisplay = True
12288                                 print
12289                         print colorize("WARN", " * IMPORTANT:"),
12290                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12291                         
12292         
12293         if newsReaderDisplay:
12294                 print colorize("WARN", " *"),
12295                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12296                 print
12297
12298 def display_preserved_libs(vardbapi):
12299         MAX_DISPLAY = 3
12300
12301         # Ensure the registry is consistent with existing files.
12302         vardbapi.plib_registry.pruneNonExisting()
12303
12304         if vardbapi.plib_registry.hasEntries():
12305                 print
12306                 print colorize("WARN", "!!!") + " existing preserved libs:"
12307                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12308                 linkmap = vardbapi.linkmap
12309                 consumer_map = {}
12310                 owners = {}
12311                 linkmap_broken = False
12312
12313                 try:
12314                         linkmap.rebuild()
12315                 except portage.exception.CommandNotFound, e:
12316                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12317                                 level=logging.ERROR, noiselevel=-1)
12318                         del e
12319                         linkmap_broken = True
12320                 else:
12321                         search_for_owners = set()
12322                         for cpv in plibdata:
12323                                 internal_plib_keys = set(linkmap._obj_key(f) \
12324                                         for f in plibdata[cpv])
12325                                 for f in plibdata[cpv]:
12326                                         if f in consumer_map:
12327                                                 continue
12328                                         consumers = []
12329                                         for c in linkmap.findConsumers(f):
12330                                                 # Filter out any consumers that are also preserved libs
12331                                                 # belonging to the same package as the provider.
12332                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12333                                                         consumers.append(c)
12334                                         consumers.sort()
12335                                         consumer_map[f] = consumers
12336                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12337
12338                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12339
12340                 for cpv in plibdata:
12341                         print colorize("WARN", ">>>") + " package: %s" % cpv
12342                         samefile_map = {}
12343                         for f in plibdata[cpv]:
12344                                 obj_key = linkmap._obj_key(f)
12345                                 alt_paths = samefile_map.get(obj_key)
12346                                 if alt_paths is None:
12347                                         alt_paths = set()
12348                                         samefile_map[obj_key] = alt_paths
12349                                 alt_paths.add(f)
12350
12351                         for alt_paths in samefile_map.itervalues():
12352                                 alt_paths = sorted(alt_paths)
12353                                 for p in alt_paths:
12354                                         print colorize("WARN", " * ") + " - %s" % (p,)
12355                                 f = alt_paths[0]
12356                                 consumers = consumer_map.get(f, [])
12357                                 for c in consumers[:MAX_DISPLAY]:
12358                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12359                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12360                                 if len(consumers) == MAX_DISPLAY + 1:
12361                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12362                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12363                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12364                                 elif len(consumers) > MAX_DISPLAY:
12365                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12366                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12367
12368
12369 def _flush_elog_mod_echo():
12370         """
12371         Dump the mod_echo output now so that our other
12372         notifications are shown last.
12373         @rtype: bool
12374         @returns: True if messages were shown, False otherwise.
12375         """
12376         messages_shown = False
12377         try:
12378                 from portage.elog import mod_echo
12379         except ImportError:
12380                 pass # happens during downgrade to a version without the module
12381         else:
12382                 messages_shown = bool(mod_echo._items)
12383                 mod_echo.finalize()
12384         return messages_shown
12385
12386 def post_emerge(root_config, myopts, mtimedb, retval):
12387         """
12388         Misc. things to run at the end of a merge session.
12389         
12390         Update Info Files
12391         Update Config Files
12392         Update News Items
12393         Commit mtimeDB
12394         Display preserved libs warnings
12395         Exit Emerge
12396
12397         @param trees: A dictionary mapping each ROOT to it's package databases
12398         @type trees: dict
12399         @param mtimedb: The mtimeDB to store data needed across merge invocations
12400         @type mtimedb: MtimeDB class instance
12401         @param retval: Emerge's return value
12402         @type retval: Int
12403         @rype: None
12404         @returns:
12405         1.  Calls sys.exit(retval)
12406         """
12407
12408         target_root = root_config.root
12409         trees = { target_root : root_config.trees }
12410         vardbapi = trees[target_root]["vartree"].dbapi
12411         settings = vardbapi.settings
12412         info_mtimes = mtimedb["info"]
12413
12414         # Load the most current variables from ${ROOT}/etc/profile.env
12415         settings.unlock()
12416         settings.reload()
12417         settings.regenerate()
12418         settings.lock()
12419
12420         config_protect = settings.get("CONFIG_PROTECT","").split()
12421         infodirs = settings.get("INFOPATH","").split(":") + \
12422                 settings.get("INFODIR","").split(":")
12423
12424         os.chdir("/")
12425
12426         if retval == os.EX_OK:
12427                 exit_msg = " *** exiting successfully."
12428         else:
12429                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12430         emergelog("notitles" not in settings.features, exit_msg)
12431
12432         _flush_elog_mod_echo()
12433
12434         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12435         if "--pretend" in myopts or (counter_hash is not None and \
12436                 counter_hash == vardbapi._counter_hash()):
12437                 display_news_notification(root_config, myopts)
12438                 # If vdb state has not changed then there's nothing else to do.
12439                 sys.exit(retval)
12440
12441         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12442         portage.util.ensure_dirs(vdb_path)
12443         vdb_lock = None
12444         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12445                 vdb_lock = portage.locks.lockdir(vdb_path)
12446
12447         if vdb_lock:
12448                 try:
12449                         if "noinfo" not in settings.features:
12450                                 chk_updated_info_files(target_root,
12451                                         infodirs, info_mtimes, retval)
12452                         mtimedb.commit()
12453                 finally:
12454                         if vdb_lock:
12455                                 portage.locks.unlockdir(vdb_lock)
12456
12457         chk_updated_cfg_files(target_root, config_protect)
12458         
12459         display_news_notification(root_config, myopts)
12460         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12461                 display_preserved_libs(vardbapi)        
12462
12463         sys.exit(retval)
12464
12465
12466 def chk_updated_cfg_files(target_root, config_protect):
12467         if config_protect:
12468                 #number of directories with some protect files in them
12469                 procount=0
12470                 for x in config_protect:
12471                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12472                         if not os.access(x, os.W_OK):
12473                                 # Avoid Permission denied errors generated
12474                                 # later by `find`.
12475                                 continue
12476                         try:
12477                                 mymode = os.lstat(x).st_mode
12478                         except OSError:
12479                                 continue
12480                         if stat.S_ISLNK(mymode):
12481                                 # We want to treat it like a directory if it
12482                                 # is a symlink to an existing directory.
12483                                 try:
12484                                         real_mode = os.stat(x).st_mode
12485                                         if stat.S_ISDIR(real_mode):
12486                                                 mymode = real_mode
12487                                 except OSError:
12488                                         pass
12489                         if stat.S_ISDIR(mymode):
12490                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12491                         else:
12492                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12493                                         os.path.split(x.rstrip(os.path.sep))
12494                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12495                         a = commands.getstatusoutput(mycommand)
12496                         if a[0] != 0:
12497                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12498                                 sys.stderr.flush()
12499                                 # Show the error message alone, sending stdout to /dev/null.
12500                                 os.system(mycommand + " 1>/dev/null")
12501                         else:
12502                                 files = a[1].split('\0')
12503                                 # split always produces an empty string as the last element
12504                                 if files and not files[-1]:
12505                                         del files[-1]
12506                                 if files:
12507                                         procount += 1
12508                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12509                                         if stat.S_ISDIR(mymode):
12510                                                  print "%d config files in '%s' need updating." % \
12511                                                         (len(files), x)
12512                                         else:
12513                                                  print "config file '%s' needs updating." % x
12514
12515                 if procount:
12516                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12517                                 " section of the " + bold("emerge")
12518                         print " "+yellow("*")+" man page to learn how to update config files."
12519
12520 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12521         update=False):
12522         """
12523         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12524         Returns the number of unread (yet relevent) items.
12525         
12526         @param portdb: a portage tree database
12527         @type portdb: pordbapi
12528         @param vardb: an installed package database
12529         @type vardb: vardbapi
12530         @param NEWS_PATH:
12531         @type NEWS_PATH:
12532         @param UNREAD_PATH:
12533         @type UNREAD_PATH:
12534         @param repo_id:
12535         @type repo_id:
12536         @rtype: Integer
12537         @returns:
12538         1.  The number of unread but relevant news items.
12539         
12540         """
12541         from portage.news import NewsManager
12542         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12543         return manager.getUnreadItems( repo_id, update=update )
12544
12545 def insert_category_into_atom(atom, category):
12546         alphanum = re.search(r'\w', atom)
12547         if alphanum:
12548                 ret = atom[:alphanum.start()] + "%s/" % category + \
12549                         atom[alphanum.start():]
12550         else:
12551                 ret = None
12552         return ret
12553
12554 def is_valid_package_atom(x):
12555         if "/" not in x:
12556                 alphanum = re.search(r'\w', x)
12557                 if alphanum:
12558                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12559         return portage.isvalidatom(x)
12560
12561 def show_blocker_docs_link():
12562         print
12563         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12564         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12565         print
12566         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12567         print
12568
12569 def show_mask_docs():
12570         print "For more information, see the MASKED PACKAGES section in the emerge"
12571         print "man page or refer to the Gentoo Handbook."
12572
12573 def action_sync(settings, trees, mtimedb, myopts, myaction):
12574         xterm_titles = "notitles" not in settings.features
12575         emergelog(xterm_titles, " === sync")
12576         myportdir = settings.get("PORTDIR", None)
12577         out = portage.output.EOutput()
12578         if not myportdir:
12579                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12580                 sys.exit(1)
12581         if myportdir[-1]=="/":
12582                 myportdir=myportdir[:-1]
12583         try:
12584                 st = os.stat(myportdir)
12585         except OSError:
12586                 st = None
12587         if st is None:
12588                 print ">>>",myportdir,"not found, creating it."
12589                 os.makedirs(myportdir,0755)
12590                 st = os.stat(myportdir)
12591
12592         spawn_kwargs = {}
12593         spawn_kwargs["env"] = settings.environ()
12594         if 'usersync' in settings.features and \
12595                 portage.data.secpass >= 2 and \
12596                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12597                 st.st_gid != os.getgid() and st.st_mode & 0070):
12598                 try:
12599                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12600                 except KeyError:
12601                         pass
12602                 else:
12603                         # Drop privileges when syncing, in order to match
12604                         # existing uid/gid settings.
12605                         spawn_kwargs["uid"]    = st.st_uid
12606                         spawn_kwargs["gid"]    = st.st_gid
12607                         spawn_kwargs["groups"] = [st.st_gid]
12608                         spawn_kwargs["env"]["HOME"] = homedir
12609                         umask = 0002
12610                         if not st.st_mode & 0020:
12611                                 umask = umask | 0020
12612                         spawn_kwargs["umask"] = umask
12613
12614         syncuri = settings.get("SYNC", "").strip()
12615         if not syncuri:
12616                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12617                         noiselevel=-1, level=logging.ERROR)
12618                 return 1
12619
12620         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12621         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12622
12623         os.umask(0022)
12624         dosyncuri = syncuri
12625         updatecache_flg = False
12626         if myaction == "metadata":
12627                 print "skipping sync"
12628                 updatecache_flg = True
12629         elif ".git" in vcs_dirs:
12630                 # Update existing git repository, and ignore the syncuri. We are
12631                 # going to trust the user and assume that the user is in the branch
12632                 # that he/she wants updated. We'll let the user manage branches with
12633                 # git directly.
12634                 if portage.process.find_binary("git") is None:
12635                         msg = ["Command not found: git",
12636                         "Type \"emerge dev-util/git\" to enable git support."]
12637                         for l in msg:
12638                                 writemsg_level("!!! %s\n" % l,
12639                                         level=logging.ERROR, noiselevel=-1)
12640                         return 1
12641                 msg = ">>> Starting git pull in %s..." % myportdir
12642                 emergelog(xterm_titles, msg )
12643                 writemsg_level(msg + "\n")
12644                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12645                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12646                 if exitcode != os.EX_OK:
12647                         msg = "!!! git pull error in %s." % myportdir
12648                         emergelog(xterm_titles, msg)
12649                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12650                         return exitcode
12651                 msg = ">>> Git pull in %s successful" % myportdir
12652                 emergelog(xterm_titles, msg)
12653                 writemsg_level(msg + "\n")
12654                 exitcode = git_sync_timestamps(settings, myportdir)
12655                 if exitcode == os.EX_OK:
12656                         updatecache_flg = True
12657         elif syncuri[:8]=="rsync://":
12658                 for vcs_dir in vcs_dirs:
12659                         writemsg_level(("!!! %s appears to be under revision " + \
12660                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12661                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12662                         return 1
12663                 if not os.path.exists("/usr/bin/rsync"):
12664                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12665                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12666                         sys.exit(1)
12667                 mytimeout=180
12668
12669                 rsync_opts = []
12670                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12671                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12672                         rsync_opts.extend([
12673                                 "--recursive",    # Recurse directories
12674                                 "--links",        # Consider symlinks
12675                                 "--safe-links",   # Ignore links outside of tree
12676                                 "--perms",        # Preserve permissions
12677                                 "--times",        # Preserive mod times
12678                                 "--compress",     # Compress the data transmitted
12679                                 "--force",        # Force deletion on non-empty dirs
12680                                 "--whole-file",   # Don't do block transfers, only entire files
12681                                 "--delete",       # Delete files that aren't in the master tree
12682                                 "--stats",        # Show final statistics about what was transfered
12683                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12684                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12685                                 "--exclude=/local",       # Exclude local     from consideration
12686                                 "--exclude=/packages",    # Exclude packages  from consideration
12687                         ])
12688
12689                 else:
12690                         # The below validation is not needed when using the above hardcoded
12691                         # defaults.
12692
12693                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12694                         rsync_opts.extend(
12695                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12696                         for opt in ("--recursive", "--times"):
12697                                 if opt not in rsync_opts:
12698                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12699                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12700                                         rsync_opts.append(opt)
12701         
12702                         for exclude in ("distfiles", "local", "packages"):
12703                                 opt = "--exclude=/%s" % exclude
12704                                 if opt not in rsync_opts:
12705                                         portage.writemsg(yellow("WARNING:") + \
12706                                         " adding required option %s not included in "  % opt + \
12707                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12708                                         rsync_opts.append(opt)
12709         
12710                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12711                                 def rsync_opt_startswith(opt_prefix):
12712                                         for x in rsync_opts:
12713                                                 if x.startswith(opt_prefix):
12714                                                         return True
12715                                         return False
12716
12717                                 if not rsync_opt_startswith("--timeout="):
12718                                         rsync_opts.append("--timeout=%d" % mytimeout)
12719
12720                                 for opt in ("--compress", "--whole-file"):
12721                                         if opt not in rsync_opts:
12722                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12723                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12724                                                 rsync_opts.append(opt)
12725
12726                 if "--quiet" in myopts:
12727                         rsync_opts.append("--quiet")    # Shut up a lot
12728                 else:
12729                         rsync_opts.append("--verbose")  # Print filelist
12730
12731                 if "--verbose" in myopts:
12732                         rsync_opts.append("--progress")  # Progress meter for each file
12733
12734                 if "--debug" in myopts:
12735                         rsync_opts.append("--checksum") # Force checksum on all files
12736
12737                 # Real local timestamp file.
12738                 servertimestampfile = os.path.join(
12739                         myportdir, "metadata", "timestamp.chk")
12740
12741                 content = portage.util.grabfile(servertimestampfile)
12742                 mytimestamp = 0
12743                 if content:
12744                         try:
12745                                 mytimestamp = time.mktime(time.strptime(content[0],
12746                                         "%a, %d %b %Y %H:%M:%S +0000"))
12747                         except (OverflowError, ValueError):
12748                                 pass
12749                 del content
12750
12751                 try:
12752                         rsync_initial_timeout = \
12753                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12754                 except ValueError:
12755                         rsync_initial_timeout = 15
12756
12757                 try:
12758                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12759                 except SystemExit, e:
12760                         raise # Needed else can't exit
12761                 except:
12762                         maxretries=3 #default number of retries
12763
12764                 retries=0
12765                 user_name, hostname, port = re.split(
12766                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12767                 if port is None:
12768                         port=""
12769                 if user_name is None:
12770                         user_name=""
12771                 updatecache_flg=True
12772                 all_rsync_opts = set(rsync_opts)
12773                 extra_rsync_opts = shlex.split(
12774                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12775                 all_rsync_opts.update(extra_rsync_opts)
12776                 family = socket.AF_INET
12777                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12778                         family = socket.AF_INET
12779                 elif socket.has_ipv6 and \
12780                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12781                         family = socket.AF_INET6
12782                 ips=[]
12783                 SERVER_OUT_OF_DATE = -1
12784                 EXCEEDED_MAX_RETRIES = -2
12785                 while (1):
12786                         if ips:
12787                                 del ips[0]
12788                         if ips==[]:
12789                                 try:
12790                                         for addrinfo in socket.getaddrinfo(
12791                                                 hostname, None, family, socket.SOCK_STREAM):
12792                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12793                                                         # IPv6 addresses need to be enclosed in square brackets
12794                                                         ips.append("[%s]" % addrinfo[4][0])
12795                                                 else:
12796                                                         ips.append(addrinfo[4][0])
12797                                         from random import shuffle
12798                                         shuffle(ips)
12799                                 except SystemExit, e:
12800                                         raise # Needed else can't exit
12801                                 except Exception, e:
12802                                         print "Notice:",str(e)
12803                                         dosyncuri=syncuri
12804
12805                         if ips:
12806                                 try:
12807                                         dosyncuri = syncuri.replace(
12808                                                 "//" + user_name + hostname + port + "/",
12809                                                 "//" + user_name + ips[0] + port + "/", 1)
12810                                 except SystemExit, e:
12811                                         raise # Needed else can't exit
12812                                 except Exception, e:
12813                                         print "Notice:",str(e)
12814                                         dosyncuri=syncuri
12815
12816                         if (retries==0):
12817                                 if "--ask" in myopts:
12818                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12819                                                 print
12820                                                 print "Quitting."
12821                                                 print
12822                                                 sys.exit(0)
12823                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12824                                 if "--quiet" not in myopts:
12825                                         print ">>> Starting rsync with "+dosyncuri+"..."
12826                         else:
12827                                 emergelog(xterm_titles,
12828                                         ">>> Starting retry %d of %d with %s" % \
12829                                                 (retries,maxretries,dosyncuri))
12830                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12831
12832                         if mytimestamp != 0 and "--quiet" not in myopts:
12833                                 print ">>> Checking server timestamp ..."
12834
12835                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12836
12837                         if "--debug" in myopts:
12838                                 print rsynccommand
12839
12840                         exitcode = os.EX_OK
12841                         servertimestamp = 0
12842                         # Even if there's no timestamp available locally, fetch the
12843                         # timestamp anyway as an initial probe to verify that the server is
12844                         # responsive.  This protects us from hanging indefinitely on a
12845                         # connection attempt to an unresponsive server which rsync's
12846                         # --timeout option does not prevent.
12847                         if True:
12848                                 # Temporary file for remote server timestamp comparison.
12849                                 from tempfile import mkstemp
12850                                 fd, tmpservertimestampfile = mkstemp()
12851                                 os.close(fd)
12852                                 mycommand = rsynccommand[:]
12853                                 mycommand.append(dosyncuri.rstrip("/") + \
12854                                         "/metadata/timestamp.chk")
12855                                 mycommand.append(tmpservertimestampfile)
12856                                 content = None
12857                                 mypids = []
12858                                 try:
12859                                         def timeout_handler(signum, frame):
12860                                                 raise portage.exception.PortageException("timed out")
12861                                         signal.signal(signal.SIGALRM, timeout_handler)
12862                                         # Timeout here in case the server is unresponsive.  The
12863                                         # --timeout rsync option doesn't apply to the initial
12864                                         # connection attempt.
12865                                         if rsync_initial_timeout:
12866                                                 signal.alarm(rsync_initial_timeout)
12867                                         try:
12868                                                 mypids.extend(portage.process.spawn(
12869                                                         mycommand, env=settings.environ(), returnpid=True))
12870                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12871                                                 content = portage.grabfile(tmpservertimestampfile)
12872                                         finally:
12873                                                 if rsync_initial_timeout:
12874                                                         signal.alarm(0)
12875                                                 try:
12876                                                         os.unlink(tmpservertimestampfile)
12877                                                 except OSError:
12878                                                         pass
12879                                 except portage.exception.PortageException, e:
12880                                         # timed out
12881                                         print e
12882                                         del e
12883                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12884                                                 os.kill(mypids[0], signal.SIGTERM)
12885                                                 os.waitpid(mypids[0], 0)
12886                                         # This is the same code rsync uses for timeout.
12887                                         exitcode = 30
12888                                 else:
12889                                         if exitcode != os.EX_OK:
12890                                                 if exitcode & 0xff:
12891                                                         exitcode = (exitcode & 0xff) << 8
12892                                                 else:
12893                                                         exitcode = exitcode >> 8
12894                                 if mypids:
12895                                         portage.process.spawned_pids.remove(mypids[0])
12896                                 if content:
12897                                         try:
12898                                                 servertimestamp = time.mktime(time.strptime(
12899                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12900                                         except (OverflowError, ValueError):
12901                                                 pass
12902                                 del mycommand, mypids, content
12903                         if exitcode == os.EX_OK:
12904                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12905                                         emergelog(xterm_titles,
12906                                                 ">>> Cancelling sync -- Already current.")
12907                                         print
12908                                         print ">>>"
12909                                         print ">>> Timestamps on the server and in the local repository are the same."
12910                                         print ">>> Cancelling all further sync action. You are already up to date."
12911                                         print ">>>"
12912                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12913                                         print ">>>"
12914                                         print
12915                                         sys.exit(0)
12916                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12917                                         emergelog(xterm_titles,
12918                                                 ">>> Server out of date: %s" % dosyncuri)
12919                                         print
12920                                         print ">>>"
12921                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12922                                         print ">>>"
12923                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12924                                         print ">>>"
12925                                         print
12926                                         exitcode = SERVER_OUT_OF_DATE
12927                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12928                                         # actual sync
12929                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12930                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12931                                         if exitcode in [0,1,3,4,11,14,20,21]:
12932                                                 break
12933                         elif exitcode in [1,3,4,11,14,20,21]:
12934                                 break
12935                         else:
12936                                 # Code 2 indicates protocol incompatibility, which is expected
12937                                 # for servers with protocol < 29 that don't support
12938                                 # --prune-empty-directories.  Retry for a server that supports
12939                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12940                                 pass
12941
12942                         retries=retries+1
12943
12944                         if retries<=maxretries:
12945                                 print ">>> Retrying..."
12946                                 time.sleep(11)
12947                         else:
12948                                 # over retries
12949                                 # exit loop
12950                                 updatecache_flg=False
12951                                 exitcode = EXCEEDED_MAX_RETRIES
12952                                 break
12953
12954                 if (exitcode==0):
12955                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12956                 elif exitcode == SERVER_OUT_OF_DATE:
12957                         sys.exit(1)
12958                 elif exitcode == EXCEEDED_MAX_RETRIES:
12959                         sys.stderr.write(
12960                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12961                         sys.exit(1)
12962                 elif (exitcode>0):
12963                         msg = []
12964                         if exitcode==1:
12965                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12966                                 msg.append("that your SYNC statement is proper.")
12967                                 msg.append("SYNC=" + settings["SYNC"])
12968                         elif exitcode==11:
12969                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12970                                 msg.append("this means your disk is full, but can be caused by corruption")
12971                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12972                                 msg.append("and try again after the problem has been fixed.")
12973                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12974                         elif exitcode==20:
12975                                 msg.append("Rsync was killed before it finished.")
12976                         else:
12977                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12978                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12979                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12980                                 msg.append("temporary problem unless complications exist with your network")
12981                                 msg.append("(and possibly your system's filesystem) configuration.")
12982                         for line in msg:
12983                                 out.eerror(line)
12984                         sys.exit(exitcode)
12985         elif syncuri[:6]=="cvs://":
12986                 if not os.path.exists("/usr/bin/cvs"):
12987                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12988                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12989                         sys.exit(1)
12990                 cvsroot=syncuri[6:]
12991                 cvsdir=os.path.dirname(myportdir)
12992                 if not os.path.exists(myportdir+"/CVS"):
12993                         #initial checkout
12994                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12995                         if os.path.exists(cvsdir+"/gentoo-x86"):
12996                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12997                                 sys.exit(1)
12998                         try:
12999                                 os.rmdir(myportdir)
13000                         except OSError, e:
13001                                 if e.errno != errno.ENOENT:
13002                                         sys.stderr.write(
13003                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13004                                         sys.exit(1)
13005                                 del e
13006                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13007                                 print "!!! cvs checkout error; exiting."
13008                                 sys.exit(1)
13009                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13010                 else:
13011                         #cvs update
13012                         print ">>> Starting cvs update with "+syncuri+"..."
13013                         retval = portage.process.spawn_bash(
13014                                 "cd %s; cvs -z0 -q update -dP" % \
13015                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13016                         if retval != os.EX_OK:
13017                                 sys.exit(retval)
13018                 dosyncuri = syncuri
13019         else:
13020                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13021                         noiselevel=-1, level=logging.ERROR)
13022                 return 1
13023
13024         if updatecache_flg and  \
13025                 myaction != "metadata" and \
13026                 "metadata-transfer" not in settings.features:
13027                 updatecache_flg = False
13028
13029         # Reload the whole config from scratch.
13030         settings, trees, mtimedb = load_emerge_config(trees=trees)
13031         root_config = trees[settings["ROOT"]]["root_config"]
13032         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13033
13034         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13035                 action_metadata(settings, portdb, myopts)
13036
13037         if portage._global_updates(trees, mtimedb["updates"]):
13038                 mtimedb.commit()
13039                 # Reload the whole config from scratch.
13040                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13041                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13042                 root_config = trees[settings["ROOT"]]["root_config"]
13043
13044         mybestpv = portdb.xmatch("bestmatch-visible",
13045                 portage.const.PORTAGE_PACKAGE_ATOM)
13046         mypvs = portage.best(
13047                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13048                 portage.const.PORTAGE_PACKAGE_ATOM))
13049
13050         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13051
13052         if myaction != "metadata":
13053                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13054                         retval = portage.process.spawn(
13055                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13056                                 dosyncuri], env=settings.environ())
13057                         if retval != os.EX_OK:
13058                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13059
13060         if(mybestpv != mypvs) and not "--quiet" in myopts:
13061                 print
13062                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13063                 print red(" * ")+"that you update portage now, before any other packages are updated."
13064                 print
13065                 print red(" * ")+"To update portage, run 'emerge portage' now."
13066                 print
13067         
13068         display_news_notification(root_config, myopts)
13069         return os.EX_OK
13070
13071 def git_sync_timestamps(settings, portdir):
13072         """
13073         Since git doesn't preserve timestamps, synchronize timestamps between
13074         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13075         for a given file as long as the file in the working tree is not modified
13076         (relative to HEAD).
13077         """
13078         cache_dir = os.path.join(portdir, "metadata", "cache")
13079         if not os.path.isdir(cache_dir):
13080                 return os.EX_OK
13081         writemsg_level(">>> Synchronizing timestamps...\n")
13082
13083         from portage.cache.cache_errors import CacheError
13084         try:
13085                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13086                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13087         except CacheError, e:
13088                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13089                         level=logging.ERROR, noiselevel=-1)
13090                 return 1
13091
13092         ec_dir = os.path.join(portdir, "eclass")
13093         try:
13094                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13095                         if f.endswith(".eclass"))
13096         except OSError, e:
13097                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13098                         level=logging.ERROR, noiselevel=-1)
13099                 return 1
13100
13101         args = [portage.const.BASH_BINARY, "-c",
13102                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13103                 portage._shell_quote(portdir)]
13104         import subprocess
13105         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13106         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13107         rval = proc.wait()
13108         if rval != os.EX_OK:
13109                 return rval
13110
13111         modified_eclasses = set(ec for ec in ec_names \
13112                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13113
13114         updated_ec_mtimes = {}
13115
13116         for cpv in cache_db:
13117                 cpv_split = portage.catpkgsplit(cpv)
13118                 if cpv_split is None:
13119                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13120                                 level=logging.ERROR, noiselevel=-1)
13121                         continue
13122
13123                 cat, pn, ver, rev = cpv_split
13124                 cat, pf = portage.catsplit(cpv)
13125                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13126                 if relative_eb_path in modified_files:
13127                         continue
13128
13129                 try:
13130                         cache_entry = cache_db[cpv]
13131                         eb_mtime = cache_entry.get("_mtime_")
13132                         ec_mtimes = cache_entry.get("_eclasses_")
13133                 except KeyError:
13134                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13135                                 level=logging.ERROR, noiselevel=-1)
13136                         continue
13137                 except CacheError, e:
13138                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13139                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13140                         continue
13141
13142                 if eb_mtime is None:
13143                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13144                                 level=logging.ERROR, noiselevel=-1)
13145                         continue
13146
13147                 try:
13148                         eb_mtime = long(eb_mtime)
13149                 except ValueError:
13150                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13151                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13152                         continue
13153
13154                 if ec_mtimes is None:
13155                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13156                                 level=logging.ERROR, noiselevel=-1)
13157                         continue
13158
13159                 if modified_eclasses.intersection(ec_mtimes):
13160                         continue
13161
13162                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13163                 if missing_eclasses:
13164                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13165                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13166                                 noiselevel=-1)
13167                         continue
13168
13169                 eb_path = os.path.join(portdir, relative_eb_path)
13170                 try:
13171                         current_eb_mtime = os.stat(eb_path)
13172                 except OSError:
13173                         writemsg_level("!!! Missing ebuild: %s\n" % \
13174                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13175                         continue
13176
13177                 inconsistent = False
13178                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13179                         updated_mtime = updated_ec_mtimes.get(ec)
13180                         if updated_mtime is not None and updated_mtime != ec_mtime:
13181                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13182                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13183                                 inconsistent = True
13184                                 break
13185
13186                 if inconsistent:
13187                         continue
13188
13189                 if current_eb_mtime != eb_mtime:
13190                         os.utime(eb_path, (eb_mtime, eb_mtime))
13191
13192                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13193                         if ec in updated_ec_mtimes:
13194                                 continue
13195                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13196                         current_mtime = long(os.stat(ec_path).st_mtime)
13197                         if current_mtime != ec_mtime:
13198                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13199                         updated_ec_mtimes[ec] = ec_mtime
13200
13201         return os.EX_OK
13202
13203 def action_metadata(settings, portdb, myopts):
13204         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13205         old_umask = os.umask(0002)
13206         cachedir = os.path.normpath(settings.depcachedir)
13207         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13208                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13209                                         "/sys", "/tmp", "/usr",  "/var"]:
13210                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13211                         "ROOT DIRECTORY ON YOUR SYSTEM."
13212                 print >> sys.stderr, \
13213                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13214                 sys.exit(73)
13215         if not os.path.exists(cachedir):
13216                 os.mkdir(cachedir)
13217
13218         ec = portage.eclass_cache.cache(portdb.porttree_root)
13219         myportdir = os.path.realpath(settings["PORTDIR"])
13220         cm = settings.load_best_module("portdbapi.metadbmodule")(
13221                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13222
13223         from portage.cache import util
13224
13225         class percentage_noise_maker(util.quiet_mirroring):
13226                 def __init__(self, dbapi):
13227                         self.dbapi = dbapi
13228                         self.cp_all = dbapi.cp_all()
13229                         l = len(self.cp_all)
13230                         self.call_update_min = 100000000
13231                         self.min_cp_all = l/100.0
13232                         self.count = 1
13233                         self.pstr = ''
13234
13235                 def __iter__(self):
13236                         for x in self.cp_all:
13237                                 self.count += 1
13238                                 if self.count > self.min_cp_all:
13239                                         self.call_update_min = 0
13240                                         self.count = 0
13241                                 for y in self.dbapi.cp_list(x):
13242                                         yield y
13243                         self.call_update_mine = 0
13244
13245                 def update(self, *arg):
13246                         try:
13247                                 self.pstr = int(self.pstr) + 1
13248                         except ValueError:
13249                                 self.pstr = 1
13250                         sys.stdout.write("%s%i%%" % \
13251                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13252                         sys.stdout.flush()
13253                         self.call_update_min = 10000000
13254
13255                 def finish(self, *arg):
13256                         sys.stdout.write("\b\b\b\b100%\n")
13257                         sys.stdout.flush()
13258
13259         if "--quiet" in myopts:
13260                 def quicky_cpv_generator(cp_all_list):
13261                         for x in cp_all_list:
13262                                 for y in portdb.cp_list(x):
13263                                         yield y
13264                 source = quicky_cpv_generator(portdb.cp_all())
13265                 noise_maker = portage.cache.util.quiet_mirroring()
13266         else:
13267                 noise_maker = source = percentage_noise_maker(portdb)
13268         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13269                 eclass_cache=ec, verbose_instance=noise_maker)
13270
13271         sys.stdout.flush()
13272         os.umask(old_umask)
13273
13274 def action_regen(settings, portdb, max_jobs, max_load):
13275         xterm_titles = "notitles" not in settings.features
13276         emergelog(xterm_titles, " === regen")
13277         #regenerate cache entries
13278         portage.writemsg_stdout("Regenerating cache entries...\n")
13279         try:
13280                 os.close(sys.stdin.fileno())
13281         except SystemExit, e:
13282                 raise # Needed else can't exit
13283         except:
13284                 pass
13285         sys.stdout.flush()
13286
13287         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13288         regen.run()
13289
13290         portage.writemsg_stdout("done!\n")
13291         return regen.returncode
13292
13293 def action_config(settings, trees, myopts, myfiles):
13294         if len(myfiles) != 1:
13295                 print red("!!! config can only take a single package atom at this time\n")
13296                 sys.exit(1)
13297         if not is_valid_package_atom(myfiles[0]):
13298                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13299                         noiselevel=-1)
13300                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13301                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13302                 sys.exit(1)
13303         print
13304         try:
13305                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13306         except portage.exception.AmbiguousPackageName, e:
13307                 # Multiple matches thrown from cpv_expand
13308                 pkgs = e.args[0]
13309         if len(pkgs) == 0:
13310                 print "No packages found.\n"
13311                 sys.exit(0)
13312         elif len(pkgs) > 1:
13313                 if "--ask" in myopts:
13314                         options = []
13315                         print "Please select a package to configure:"
13316                         idx = 0
13317                         for pkg in pkgs:
13318                                 idx += 1
13319                                 options.append(str(idx))
13320                                 print options[-1]+") "+pkg
13321                         print "X) Cancel"
13322                         options.append("X")
13323                         idx = userquery("Selection?", options)
13324                         if idx == "X":
13325                                 sys.exit(0)
13326                         pkg = pkgs[int(idx)-1]
13327                 else:
13328                         print "The following packages available:"
13329                         for pkg in pkgs:
13330                                 print "* "+pkg
13331                         print "\nPlease use a specific atom or the --ask option."
13332                         sys.exit(1)
13333         else:
13334                 pkg = pkgs[0]
13335
13336         print
13337         if "--ask" in myopts:
13338                 if userquery("Ready to configure "+pkg+"?") == "No":
13339                         sys.exit(0)
13340         else:
13341                 print "Configuring pkg..."
13342         print
13343         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13344         mysettings = portage.config(clone=settings)
13345         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13346         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13347         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13348                 mysettings,
13349                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13350                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13351         if retval == os.EX_OK:
13352                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13353                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13354         print
13355
13356 def action_info(settings, trees, myopts, myfiles):
13357         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13358                 settings.profile_path, settings["CHOST"],
13359                 trees[settings["ROOT"]]["vartree"].dbapi)
13360         header_width = 65
13361         header_title = "System Settings"
13362         if myfiles:
13363                 print header_width * "="
13364                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13365         print header_width * "="
13366         print "System uname: "+platform.platform(aliased=1)
13367
13368         lastSync = portage.grabfile(os.path.join(
13369                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13370         print "Timestamp of tree:",
13371         if lastSync:
13372                 print lastSync[0]
13373         else:
13374                 print "Unknown"
13375
13376         output=commands.getstatusoutput("distcc --version")
13377         if not output[0]:
13378                 print str(output[1].split("\n",1)[0]),
13379                 if "distcc" in settings.features:
13380                         print "[enabled]"
13381                 else:
13382                         print "[disabled]"
13383
13384         output=commands.getstatusoutput("ccache -V")
13385         if not output[0]:
13386                 print str(output[1].split("\n",1)[0]),
13387                 if "ccache" in settings.features:
13388                         print "[enabled]"
13389                 else:
13390                         print "[disabled]"
13391
13392         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13393                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13394         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13395         myvars  = portage.util.unique_array(myvars)
13396         myvars.sort()
13397
13398         for x in myvars:
13399                 if portage.isvalidatom(x):
13400                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13401                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13402                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13403                         pkgs = []
13404                         for pn, ver, rev in pkg_matches:
13405                                 if rev != "r0":
13406                                         pkgs.append(ver + "-" + rev)
13407                                 else:
13408                                         pkgs.append(ver)
13409                         if pkgs:
13410                                 pkgs = ", ".join(pkgs)
13411                                 print "%-20s %s" % (x+":", pkgs)
13412                 else:
13413                         print "%-20s %s" % (x+":", "[NOT VALID]")
13414
13415         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13416
13417         if "--verbose" in myopts:
13418                 myvars=settings.keys()
13419         else:
13420                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13421                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13422                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13423                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13424
13425                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13426
13427         myvars = portage.util.unique_array(myvars)
13428         unset_vars = []
13429         myvars.sort()
13430         for x in myvars:
13431                 if x in settings:
13432                         if x != "USE":
13433                                 print '%s="%s"' % (x, settings[x])
13434                         else:
13435                                 use = set(settings["USE"].split())
13436                                 use_expand = settings["USE_EXPAND"].split()
13437                                 use_expand.sort()
13438                                 for varname in use_expand:
13439                                         flag_prefix = varname.lower() + "_"
13440                                         for f in list(use):
13441                                                 if f.startswith(flag_prefix):
13442                                                         use.remove(f)
13443                                 use = list(use)
13444                                 use.sort()
13445                                 print 'USE="%s"' % " ".join(use),
13446                                 for varname in use_expand:
13447                                         myval = settings.get(varname)
13448                                         if myval:
13449                                                 print '%s="%s"' % (varname, myval),
13450                                 print
13451                 else:
13452                         unset_vars.append(x)
13453         if unset_vars:
13454                 print "Unset:  "+", ".join(unset_vars)
13455         print
13456
13457         if "--debug" in myopts:
13458                 for x in dir(portage):
13459                         module = getattr(portage, x)
13460                         if "cvs_id_string" in dir(module):
13461                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13462
13463         # See if we can find any packages installed matching the strings
13464         # passed on the command line
13465         mypkgs = []
13466         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13467         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13468         for x in myfiles:
13469                 mypkgs.extend(vardb.match(x))
13470
13471         # If some packages were found...
13472         if mypkgs:
13473                 # Get our global settings (we only print stuff if it varies from
13474                 # the current config)
13475                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13476                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13477                 global_vals = {}
13478                 pkgsettings = portage.config(clone=settings)
13479
13480                 for myvar in mydesiredvars:
13481                         global_vals[myvar] = set(settings.get(myvar, "").split())
13482
13483                 # Loop through each package
13484                 # Only print settings if they differ from global settings
13485                 header_title = "Package Settings"
13486                 print header_width * "="
13487                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13488                 print header_width * "="
13489                 from portage.output import EOutput
13490                 out = EOutput()
13491                 for pkg in mypkgs:
13492                         # Get all package specific variables
13493                         auxvalues = vardb.aux_get(pkg, auxkeys)
13494                         valuesmap = {}
13495                         for i in xrange(len(auxkeys)):
13496                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13497                         diff_values = {}
13498                         for myvar in mydesiredvars:
13499                                 # If the package variable doesn't match the
13500                                 # current global variable, something has changed
13501                                 # so set diff_found so we know to print
13502                                 if valuesmap[myvar] != global_vals[myvar]:
13503                                         diff_values[myvar] = valuesmap[myvar]
13504                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13505                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13506                         pkgsettings.reset()
13507                         # If a matching ebuild is no longer available in the tree, maybe it
13508                         # would make sense to compare against the flags for the best
13509                         # available version with the same slot?
13510                         mydb = None
13511                         if portdb.cpv_exists(pkg):
13512                                 mydb = portdb
13513                         pkgsettings.setcpv(pkg, mydb=mydb)
13514                         if valuesmap["IUSE"].intersection(
13515                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13516                                 diff_values["USE"] = valuesmap["USE"]
13517                         # If a difference was found, print the info for
13518                         # this package.
13519                         if diff_values:
13520                                 # Print package info
13521                                 print "%s was built with the following:" % pkg
13522                                 for myvar in mydesiredvars + ["USE"]:
13523                                         if myvar in diff_values:
13524                                                 mylist = list(diff_values[myvar])
13525                                                 mylist.sort()
13526                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13527                                 print
13528                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13529                         ebuildpath = vardb.findname(pkg)
13530                         if not ebuildpath or not os.path.exists(ebuildpath):
13531                                 out.ewarn("No ebuild found for '%s'" % pkg)
13532                                 continue
13533                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13534                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13535                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13536                                 tree="vartree")
13537
13538 def action_search(root_config, myopts, myfiles, spinner):
13539         if not myfiles:
13540                 print "emerge: no search terms provided."
13541         else:
13542                 searchinstance = search(root_config,
13543                         spinner, "--searchdesc" in myopts,
13544                         "--quiet" not in myopts, "--usepkg" in myopts,
13545                         "--usepkgonly" in myopts)
13546                 for mysearch in myfiles:
13547                         try:
13548                                 searchinstance.execute(mysearch)
13549                         except re.error, comment:
13550                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13551                                 sys.exit(1)
13552                         searchinstance.output()
13553
13554 def action_depclean(settings, trees, ldpath_mtimes,
13555         myopts, action, myfiles, spinner):
13556         # Kill packages that aren't explicitly merged or are required as a
13557         # dependency of another package. World file is explicit.
13558
13559         # Global depclean or prune operations are not very safe when there are
13560         # missing dependencies since it's unknown how badly incomplete
13561         # the dependency graph is, and we might accidentally remove packages
13562         # that should have been pulled into the graph. On the other hand, it's
13563         # relatively safe to ignore missing deps when only asked to remove
13564         # specific packages.
13565         allow_missing_deps = len(myfiles) > 0
13566
13567         msg = []
13568         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13569         msg.append("mistakes. Packages that are part of the world set will always\n")
13570         msg.append("be kept.  They can be manually added to this set with\n")
13571         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13572         msg.append("package.provided (see portage(5)) will be removed by\n")
13573         msg.append("depclean, even if they are part of the world set.\n")
13574         msg.append("\n")
13575         msg.append("As a safety measure, depclean will not remove any packages\n")
13576         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13577         msg.append("consequence, it is often necessary to run %s\n" % \
13578                 good("`emerge --update"))
13579         msg.append(good("--newuse --deep @system @world`") + \
13580                 " prior to depclean.\n")
13581
13582         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13583                 portage.writemsg_stdout("\n")
13584                 for x in msg:
13585                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13586
13587         xterm_titles = "notitles" not in settings.features
13588         myroot = settings["ROOT"]
13589         root_config = trees[myroot]["root_config"]
13590         getSetAtoms = root_config.setconfig.getSetAtoms
13591         vardb = trees[myroot]["vartree"].dbapi
13592
13593         required_set_names = ("system", "world")
13594         required_sets = {}
13595         set_args = []
13596
13597         for s in required_set_names:
13598                 required_sets[s] = InternalPackageSet(
13599                         initial_atoms=getSetAtoms(s))
13600
13601         
13602         # When removing packages, use a temporary version of world
13603         # which excludes packages that are intended to be eligible for
13604         # removal.
13605         world_temp_set = required_sets["world"]
13606         system_set = required_sets["system"]
13607
13608         if not system_set or not world_temp_set:
13609
13610                 if not system_set:
13611                         writemsg_level("!!! You have no system list.\n",
13612                                 level=logging.ERROR, noiselevel=-1)
13613
13614                 if not world_temp_set:
13615                         writemsg_level("!!! You have no world file.\n",
13616                                         level=logging.WARNING, noiselevel=-1)
13617
13618                 writemsg_level("!!! Proceeding is likely to " + \
13619                         "break your installation.\n",
13620                         level=logging.WARNING, noiselevel=-1)
13621                 if "--pretend" not in myopts:
13622                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13623
13624         if action == "depclean":
13625                 emergelog(xterm_titles, " >>> depclean")
13626
13627         import textwrap
13628         args_set = InternalPackageSet()
13629         if myfiles:
13630                 for x in myfiles:
13631                         if not is_valid_package_atom(x):
13632                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13633                                         level=logging.ERROR, noiselevel=-1)
13634                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13635                                 return
13636                         try:
13637                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13638                         except portage.exception.AmbiguousPackageName, e:
13639                                 msg = "The short ebuild name \"" + x + \
13640                                         "\" is ambiguous.  Please specify " + \
13641                                         "one of the following " + \
13642                                         "fully-qualified ebuild names instead:"
13643                                 for line in textwrap.wrap(msg, 70):
13644                                         writemsg_level("!!! %s\n" % (line,),
13645                                                 level=logging.ERROR, noiselevel=-1)
13646                                 for i in e[0]:
13647                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13648                                                 level=logging.ERROR, noiselevel=-1)
13649                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13650                                 return
13651                         args_set.add(atom)
13652                 matched_packages = False
13653                 for x in args_set:
13654                         if vardb.match(x):
13655                                 matched_packages = True
13656                                 break
13657                 if not matched_packages:
13658                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13659                                 action)
13660                         return
13661
13662         writemsg_level("\nCalculating dependencies  ")
13663         resolver_params = create_depgraph_params(myopts, "remove")
13664         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13665         vardb = resolver.trees[myroot]["vartree"].dbapi
13666
13667         if action == "depclean":
13668
13669                 if args_set:
13670                         # Pull in everything that's installed but not matched
13671                         # by an argument atom since we don't want to clean any
13672                         # package if something depends on it.
13673
13674                         world_temp_set.clear()
13675                         for pkg in vardb:
13676                                 spinner.update()
13677
13678                                 try:
13679                                         if args_set.findAtomForPackage(pkg) is None:
13680                                                 world_temp_set.add("=" + pkg.cpv)
13681                                                 continue
13682                                 except portage.exception.InvalidDependString, e:
13683                                         show_invalid_depstring_notice(pkg,
13684                                                 pkg.metadata["PROVIDE"], str(e))
13685                                         del e
13686                                         world_temp_set.add("=" + pkg.cpv)
13687                                         continue
13688
13689         elif action == "prune":
13690
13691                 # Pull in everything that's installed since we don't
13692                 # to prune a package if something depends on it.
13693                 world_temp_set.clear()
13694                 world_temp_set.update(vardb.cp_all())
13695
13696                 if not args_set:
13697
13698                         # Try to prune everything that's slotted.
13699                         for cp in vardb.cp_all():
13700                                 if len(vardb.cp_list(cp)) > 1:
13701                                         args_set.add(cp)
13702
13703                 # Remove atoms from world that match installed packages
13704                 # that are also matched by argument atoms, but do not remove
13705                 # them if they match the highest installed version.
13706                 for pkg in vardb:
13707                         spinner.update()
13708                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13709                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13710                                 raise AssertionError("package expected in matches: " + \
13711                                         "cp = %s, cpv = %s matches = %s" % \
13712                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13713
13714                         highest_version = pkgs_for_cp[-1]
13715                         if pkg == highest_version:
13716                                 # pkg is the highest version
13717                                 world_temp_set.add("=" + pkg.cpv)
13718                                 continue
13719
13720                         if len(pkgs_for_cp) <= 1:
13721                                 raise AssertionError("more packages expected: " + \
13722                                         "cp = %s, cpv = %s matches = %s" % \
13723                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13724
13725                         try:
13726                                 if args_set.findAtomForPackage(pkg) is None:
13727                                         world_temp_set.add("=" + pkg.cpv)
13728                                         continue
13729                         except portage.exception.InvalidDependString, e:
13730                                 show_invalid_depstring_notice(pkg,
13731                                         pkg.metadata["PROVIDE"], str(e))
13732                                 del e
13733                                 world_temp_set.add("=" + pkg.cpv)
13734                                 continue
13735
13736         set_args = {}
13737         for s, package_set in required_sets.iteritems():
13738                 set_atom = SETPREFIX + s
13739                 set_arg = SetArg(arg=set_atom, set=package_set,
13740                         root_config=resolver.roots[myroot])
13741                 set_args[s] = set_arg
13742                 for atom in set_arg.set:
13743                         resolver._dep_stack.append(
13744                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13745                         resolver.digraph.add(set_arg, None)
13746
13747         success = resolver._complete_graph()
13748         writemsg_level("\b\b... done!\n")
13749
13750         resolver.display_problems()
13751
13752         if not success:
13753                 return 1
13754
13755         def unresolved_deps():
13756
13757                 unresolvable = set()
13758                 for dep in resolver._initially_unsatisfied_deps:
13759                         if isinstance(dep.parent, Package) and \
13760                                 (dep.priority > UnmergeDepPriority.SOFT):
13761                                 unresolvable.add((dep.atom, dep.parent.cpv))
13762
13763                 if not unresolvable:
13764                         return False
13765
13766                 if unresolvable and not allow_missing_deps:
13767                         prefix = bad(" * ")
13768                         msg = []
13769                         msg.append("Dependencies could not be completely resolved due to")
13770                         msg.append("the following required packages not being installed:")
13771                         msg.append("")
13772                         for atom, parent in unresolvable:
13773                                 msg.append("  %s pulled in by:" % (atom,))
13774                                 msg.append("    %s" % (parent,))
13775                                 msg.append("")
13776                         msg.append("Have you forgotten to run " + \
13777                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13778                         msg.append(("to %s? It may be necessary to manually " + \
13779                                 "uninstall packages that no longer") % action)
13780                         msg.append("exist in the portage tree since " + \
13781                                 "it may not be possible to satisfy their")
13782                         msg.append("dependencies.  Also, be aware of " + \
13783                                 "the --with-bdeps option that is documented")
13784                         msg.append("in " + good("`man emerge`") + ".")
13785                         if action == "prune":
13786                                 msg.append("")
13787                                 msg.append("If you would like to ignore " + \
13788                                         "dependencies then use %s." % good("--nodeps"))
13789                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13790                                 level=logging.ERROR, noiselevel=-1)
13791                         return True
13792                 return False
13793
13794         if unresolved_deps():
13795                 return 1
13796
13797         graph = resolver.digraph.copy()
13798         required_pkgs_total = 0
13799         for node in graph:
13800                 if isinstance(node, Package):
13801                         required_pkgs_total += 1
13802
13803         def show_parents(child_node):
13804                 parent_nodes = graph.parent_nodes(child_node)
13805                 if not parent_nodes:
13806                         # With --prune, the highest version can be pulled in without any
13807                         # real parent since all installed packages are pulled in.  In that
13808                         # case there's nothing to show here.
13809                         return
13810                 parent_strs = []
13811                 for node in parent_nodes:
13812                         parent_strs.append(str(getattr(node, "cpv", node)))
13813                 parent_strs.sort()
13814                 msg = []
13815                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13816                 for parent_str in parent_strs:
13817                         msg.append("    %s\n" % (parent_str,))
13818                 msg.append("\n")
13819                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13820
13821         def cmp_pkg_cpv(pkg1, pkg2):
13822                 """Sort Package instances by cpv."""
13823                 if pkg1.cpv > pkg2.cpv:
13824                         return 1
13825                 elif pkg1.cpv == pkg2.cpv:
13826                         return 0
13827                 else:
13828                         return -1
13829
13830         def create_cleanlist():
13831                 pkgs_to_remove = []
13832
13833                 if action == "depclean":
13834                         if args_set:
13835
13836                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13837                                         arg_atom = None
13838                                         try:
13839                                                 arg_atom = args_set.findAtomForPackage(pkg)
13840                                         except portage.exception.InvalidDependString:
13841                                                 # this error has already been displayed by now
13842                                                 continue
13843
13844                                         if arg_atom:
13845                                                 if pkg not in graph:
13846                                                         pkgs_to_remove.append(pkg)
13847                                                 elif "--verbose" in myopts:
13848                                                         show_parents(pkg)
13849
13850                         else:
13851                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13852                                         if pkg not in graph:
13853                                                 pkgs_to_remove.append(pkg)
13854                                         elif "--verbose" in myopts:
13855                                                 show_parents(pkg)
13856
13857                 elif action == "prune":
13858                         # Prune really uses all installed instead of world. It's not
13859                         # a real reverse dependency so don't display it as such.
13860                         graph.remove(set_args["world"])
13861
13862                         for atom in args_set:
13863                                 for pkg in vardb.match_pkgs(atom):
13864                                         if pkg not in graph:
13865                                                 pkgs_to_remove.append(pkg)
13866                                         elif "--verbose" in myopts:
13867                                                 show_parents(pkg)
13868
13869                 if not pkgs_to_remove:
13870                         writemsg_level(
13871                                 ">>> No packages selected for removal by %s\n" % action)
13872                         if "--verbose" not in myopts:
13873                                 writemsg_level(
13874                                         ">>> To see reverse dependencies, use %s\n" % \
13875                                                 good("--verbose"))
13876                         if action == "prune":
13877                                 writemsg_level(
13878                                         ">>> To ignore dependencies, use %s\n" % \
13879                                                 good("--nodeps"))
13880
13881                 return pkgs_to_remove
13882
13883         cleanlist = create_cleanlist()
13884
13885         if len(cleanlist):
13886                 clean_set = set(cleanlist)
13887
13888                 # Check if any of these package are the sole providers of libraries
13889                 # with consumers that have not been selected for removal. If so, these
13890                 # packages and any dependencies need to be added to the graph.
13891                 real_vardb = trees[myroot]["vartree"].dbapi
13892                 linkmap = real_vardb.linkmap
13893                 liblist = linkmap.listLibraryObjects()
13894                 consumer_cache = {}
13895                 provider_cache = {}
13896                 soname_cache = {}
13897                 consumer_map = {}
13898
13899                 writemsg_level(">>> Checking for lib consumers...\n")
13900
13901                 for pkg in cleanlist:
13902                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13903                         provided_libs = set()
13904
13905                         for lib in liblist:
13906                                 if pkg_dblink.isowner(lib, myroot):
13907                                         provided_libs.add(lib)
13908
13909                         if not provided_libs:
13910                                 continue
13911
13912                         consumers = {}
13913                         for lib in provided_libs:
13914                                 lib_consumers = consumer_cache.get(lib)
13915                                 if lib_consumers is None:
13916                                         lib_consumers = linkmap.findConsumers(lib)
13917                                         consumer_cache[lib] = lib_consumers
13918                                 if lib_consumers:
13919                                         consumers[lib] = lib_consumers
13920
13921                         if not consumers:
13922                                 continue
13923
13924                         for lib, lib_consumers in consumers.items():
13925                                 for consumer_file in list(lib_consumers):
13926                                         if pkg_dblink.isowner(consumer_file, myroot):
13927                                                 lib_consumers.remove(consumer_file)
13928                                 if not lib_consumers:
13929                                         del consumers[lib]
13930
13931                         if not consumers:
13932                                 continue
13933
13934                         for lib, lib_consumers in consumers.iteritems():
13935
13936                                 soname = soname_cache.get(lib)
13937                                 if soname is None:
13938                                         soname = linkmap.getSoname(lib)
13939                                         soname_cache[lib] = soname
13940
13941                                 consumer_providers = []
13942                                 for lib_consumer in lib_consumers:
13943                                         providers = provider_cache.get(lib)
13944                                         if providers is None:
13945                                                 providers = linkmap.findProviders(lib_consumer)
13946                                                 provider_cache[lib_consumer] = providers
13947                                         if soname not in providers:
13948                                                 # Why does this happen?
13949                                                 continue
13950                                         consumer_providers.append(
13951                                                 (lib_consumer, providers[soname]))
13952
13953                                 consumers[lib] = consumer_providers
13954
13955                         consumer_map[pkg] = consumers
13956
13957                 if consumer_map:
13958
13959                         search_files = set()
13960                         for consumers in consumer_map.itervalues():
13961                                 for lib, consumer_providers in consumers.iteritems():
13962                                         for lib_consumer, providers in consumer_providers:
13963                                                 search_files.add(lib_consumer)
13964                                                 search_files.update(providers)
13965
13966                         writemsg_level(">>> Assigning files to packages...\n")
13967                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13968
13969                         for pkg, consumers in consumer_map.items():
13970                                 for lib, consumer_providers in consumers.items():
13971                                         lib_consumers = set()
13972
13973                                         for lib_consumer, providers in consumer_providers:
13974                                                 owner_set = file_owners.get(lib_consumer)
13975                                                 provider_dblinks = set()
13976                                                 provider_pkgs = set()
13977
13978                                                 if len(providers) > 1:
13979                                                         for provider in providers:
13980                                                                 provider_set = file_owners.get(provider)
13981                                                                 if provider_set is not None:
13982                                                                         provider_dblinks.update(provider_set)
13983
13984                                                 if len(provider_dblinks) > 1:
13985                                                         for provider_dblink in provider_dblinks:
13986                                                                 pkg_key = ("installed", myroot,
13987                                                                         provider_dblink.mycpv, "nomerge")
13988                                                                 if pkg_key not in clean_set:
13989                                                                         provider_pkgs.add(vardb.get(pkg_key))
13990
13991                                                 if provider_pkgs:
13992                                                         continue
13993
13994                                                 if owner_set is not None:
13995                                                         lib_consumers.update(owner_set)
13996
13997                                         for consumer_dblink in list(lib_consumers):
13998                                                 if ("installed", myroot, consumer_dblink.mycpv,
13999                                                         "nomerge") in clean_set:
14000                                                         lib_consumers.remove(consumer_dblink)
14001                                                         continue
14002
14003                                         if lib_consumers:
14004                                                 consumers[lib] = lib_consumers
14005                                         else:
14006                                                 del consumers[lib]
14007                                 if not consumers:
14008                                         del consumer_map[pkg]
14009
14010                 if consumer_map:
14011                         # TODO: Implement a package set for rebuilding consumer packages.
14012
14013                         msg = "In order to avoid breakage of link level " + \
14014                                 "dependencies, one or more packages will not be removed. " + \
14015                                 "This can be solved by rebuilding " + \
14016                                 "the packages that pulled them in."
14017
14018                         prefix = bad(" * ")
14019                         from textwrap import wrap
14020                         writemsg_level("".join(prefix + "%s\n" % line for \
14021                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14022
14023                         msg = []
14024                         for pkg, consumers in consumer_map.iteritems():
14025                                 unique_consumers = set(chain(*consumers.values()))
14026                                 unique_consumers = sorted(consumer.mycpv \
14027                                         for consumer in unique_consumers)
14028                                 msg.append("")
14029                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14030                                 for consumer in unique_consumers:
14031                                         msg.append("    %s" % (consumer,))
14032                         msg.append("")
14033                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14034                                 level=logging.WARNING, noiselevel=-1)
14035
14036                         # Add lib providers to the graph as children of lib consumers,
14037                         # and also add any dependencies pulled in by the provider.
14038                         writemsg_level(">>> Adding lib providers to graph...\n")
14039
14040                         for pkg, consumers in consumer_map.iteritems():
14041                                 for consumer_dblink in set(chain(*consumers.values())):
14042                                         consumer_pkg = vardb.get(("installed", myroot,
14043                                                 consumer_dblink.mycpv, "nomerge"))
14044                                         if not resolver._add_pkg(pkg,
14045                                                 Dependency(parent=consumer_pkg,
14046                                                 priority=UnmergeDepPriority(runtime=True),
14047                                                 root=pkg.root)):
14048                                                 resolver.display_problems()
14049                                                 return 1
14050
14051                         writemsg_level("\nCalculating dependencies  ")
14052                         success = resolver._complete_graph()
14053                         writemsg_level("\b\b... done!\n")
14054                         resolver.display_problems()
14055                         if not success:
14056                                 return 1
14057                         if unresolved_deps():
14058                                 return 1
14059
14060                         graph = resolver.digraph.copy()
14061                         required_pkgs_total = 0
14062                         for node in graph:
14063                                 if isinstance(node, Package):
14064                                         required_pkgs_total += 1
14065                         cleanlist = create_cleanlist()
14066                         if not cleanlist:
14067                                 return 0
14068                         clean_set = set(cleanlist)
14069
14070                 # Use a topological sort to create an unmerge order such that
14071                 # each package is unmerged before it's dependencies. This is
14072                 # necessary to avoid breaking things that may need to run
14073                 # during pkg_prerm or pkg_postrm phases.
14074
14075                 # Create a new graph to account for dependencies between the
14076                 # packages being unmerged.
14077                 graph = digraph()
14078                 del cleanlist[:]
14079
14080                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14081                 runtime = UnmergeDepPriority(runtime=True)
14082                 runtime_post = UnmergeDepPriority(runtime_post=True)
14083                 buildtime = UnmergeDepPriority(buildtime=True)
14084                 priority_map = {
14085                         "RDEPEND": runtime,
14086                         "PDEPEND": runtime_post,
14087                         "DEPEND": buildtime,
14088                 }
14089
14090                 for node in clean_set:
14091                         graph.add(node, None)
14092                         mydeps = []
14093                         node_use = node.metadata["USE"].split()
14094                         for dep_type in dep_keys:
14095                                 depstr = node.metadata[dep_type]
14096                                 if not depstr:
14097                                         continue
14098                                 try:
14099                                         portage.dep._dep_check_strict = False
14100                                         success, atoms = portage.dep_check(depstr, None, settings,
14101                                                 myuse=node_use, trees=resolver._graph_trees,
14102                                                 myroot=myroot)
14103                                 finally:
14104                                         portage.dep._dep_check_strict = True
14105                                 if not success:
14106                                         # Ignore invalid deps of packages that will
14107                                         # be uninstalled anyway.
14108                                         continue
14109
14110                                 priority = priority_map[dep_type]
14111                                 for atom in atoms:
14112                                         if not isinstance(atom, portage.dep.Atom):
14113                                                 # Ignore invalid atoms returned from dep_check().
14114                                                 continue
14115                                         if atom.blocker:
14116                                                 continue
14117                                         matches = vardb.match_pkgs(atom)
14118                                         if not matches:
14119                                                 continue
14120                                         for child_node in matches:
14121                                                 if child_node in clean_set:
14122                                                         graph.add(child_node, node, priority=priority)
14123
14124                 ordered = True
14125                 if len(graph.order) == len(graph.root_nodes()):
14126                         # If there are no dependencies between packages
14127                         # let unmerge() group them by cat/pn.
14128                         ordered = False
14129                         cleanlist = [pkg.cpv for pkg in graph.order]
14130                 else:
14131                         # Order nodes from lowest to highest overall reference count for
14132                         # optimal root node selection.
14133                         node_refcounts = {}
14134                         for node in graph.order:
14135                                 node_refcounts[node] = len(graph.parent_nodes(node))
14136                         def cmp_reference_count(node1, node2):
14137                                 return node_refcounts[node1] - node_refcounts[node2]
14138                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14139         
14140                         ignore_priority_range = [None]
14141                         ignore_priority_range.extend(
14142                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14143                         while not graph.empty():
14144                                 for ignore_priority in ignore_priority_range:
14145                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14146                                         if nodes:
14147                                                 break
14148                                 if not nodes:
14149                                         raise AssertionError("no root nodes")
14150                                 if ignore_priority is not None:
14151                                         # Some deps have been dropped due to circular dependencies,
14152                                         # so only pop one node in order do minimize the number that
14153                                         # are dropped.
14154                                         del nodes[1:]
14155                                 for node in nodes:
14156                                         graph.remove(node)
14157                                         cleanlist.append(node.cpv)
14158
14159                 unmerge(root_config, myopts, "unmerge", cleanlist,
14160                         ldpath_mtimes, ordered=ordered)
14161
14162         if action == "prune":
14163                 return
14164
14165         if not cleanlist and "--quiet" in myopts:
14166                 return
14167
14168         print "Packages installed:   "+str(len(vardb.cpv_all()))
14169         print "Packages in world:    " + \
14170                 str(len(root_config.sets["world"].getAtoms()))
14171         print "Packages in system:   " + \
14172                 str(len(root_config.sets["system"].getAtoms()))
14173         print "Required packages:    "+str(required_pkgs_total)
14174         if "--pretend" in myopts:
14175                 print "Number to remove:     "+str(len(cleanlist))
14176         else:
14177                 print "Number removed:       "+str(len(cleanlist))
14178
14179 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14180         """
14181         Construct a depgraph for the given resume list. This will raise
14182         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14183         @rtype: tuple
14184         @returns: (success, depgraph, dropped_tasks)
14185         """
14186         skip_masked = True
14187         skip_unsatisfied = True
14188         mergelist = mtimedb["resume"]["mergelist"]
14189         dropped_tasks = set()
14190         while True:
14191                 mydepgraph = depgraph(settings, trees,
14192                         myopts, myparams, spinner)
14193                 try:
14194                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14195                                 skip_masked=skip_masked)
14196                 except depgraph.UnsatisfiedResumeDep, e:
14197                         if not skip_unsatisfied:
14198                                 raise
14199
14200                         graph = mydepgraph.digraph
14201                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14202                                 for dep in e.value)
14203                         traversed_nodes = set()
14204                         unsatisfied_stack = list(unsatisfied_parents)
14205                         while unsatisfied_stack:
14206                                 pkg = unsatisfied_stack.pop()
14207                                 if pkg in traversed_nodes:
14208                                         continue
14209                                 traversed_nodes.add(pkg)
14210
14211                                 # If this package was pulled in by a parent
14212                                 # package scheduled for merge, removing this
14213                                 # package may cause the the parent package's
14214                                 # dependency to become unsatisfied.
14215                                 for parent_node in graph.parent_nodes(pkg):
14216                                         if not isinstance(parent_node, Package) \
14217                                                 or parent_node.operation not in ("merge", "nomerge"):
14218                                                 continue
14219                                         unsatisfied = \
14220                                                 graph.child_nodes(parent_node,
14221                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14222                                         if pkg in unsatisfied:
14223                                                 unsatisfied_parents[parent_node] = parent_node
14224                                                 unsatisfied_stack.append(parent_node)
14225
14226                         pruned_mergelist = []
14227                         for x in mergelist:
14228                                 if isinstance(x, list) and \
14229                                         tuple(x) not in unsatisfied_parents:
14230                                         pruned_mergelist.append(x)
14231
14232                         # If the mergelist doesn't shrink then this loop is infinite.
14233                         if len(pruned_mergelist) == len(mergelist):
14234                                 # This happens if a package can't be dropped because
14235                                 # it's already installed, but it has unsatisfied PDEPEND.
14236                                 raise
14237                         mergelist[:] = pruned_mergelist
14238
14239                         # Exclude installed packages that have been removed from the graph due
14240                         # to failure to build/install runtime dependencies after the dependent
14241                         # package has already been installed.
14242                         dropped_tasks.update(pkg for pkg in \
14243                                 unsatisfied_parents if pkg.operation != "nomerge")
14244                         mydepgraph.break_refs(unsatisfied_parents)
14245
14246                         del e, graph, traversed_nodes, \
14247                                 unsatisfied_parents, unsatisfied_stack
14248                         continue
14249                 else:
14250                         break
14251         return (success, mydepgraph, dropped_tasks)
14252
14253 def action_build(settings, trees, mtimedb,
14254         myopts, myaction, myfiles, spinner):
14255
14256         # validate the state of the resume data
14257         # so that we can make assumptions later.
14258         for k in ("resume", "resume_backup"):
14259                 if k not in mtimedb:
14260                         continue
14261                 resume_data = mtimedb[k]
14262                 if not isinstance(resume_data, dict):
14263                         del mtimedb[k]
14264                         continue
14265                 mergelist = resume_data.get("mergelist")
14266                 if not isinstance(mergelist, list):
14267                         del mtimedb[k]
14268                         continue
14269                 for x in mergelist:
14270                         if not (isinstance(x, list) and len(x) == 4):
14271                                 continue
14272                         pkg_type, pkg_root, pkg_key, pkg_action = x
14273                         if pkg_root not in trees:
14274                                 # Current $ROOT setting differs,
14275                                 # so the list must be stale.
14276                                 mergelist = None
14277                                 break
14278                 if not mergelist:
14279                         del mtimedb[k]
14280                         continue
14281                 resume_opts = resume_data.get("myopts")
14282                 if not isinstance(resume_opts, (dict, list)):
14283                         del mtimedb[k]
14284                         continue
14285                 favorites = resume_data.get("favorites")
14286                 if not isinstance(favorites, list):
14287                         del mtimedb[k]
14288                         continue
14289
14290         resume = False
14291         if "--resume" in myopts and \
14292                 ("resume" in mtimedb or
14293                 "resume_backup" in mtimedb):
14294                 resume = True
14295                 if "resume" not in mtimedb:
14296                         mtimedb["resume"] = mtimedb["resume_backup"]
14297                         del mtimedb["resume_backup"]
14298                         mtimedb.commit()
14299                 # "myopts" is a list for backward compatibility.
14300                 resume_opts = mtimedb["resume"].get("myopts", [])
14301                 if isinstance(resume_opts, list):
14302                         resume_opts = dict((k,True) for k in resume_opts)
14303                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14304                         resume_opts.pop(opt, None)
14305                 myopts.update(resume_opts)
14306
14307                 if "--debug" in myopts:
14308                         writemsg_level("myopts %s\n" % (myopts,))
14309
14310                 # Adjust config according to options of the command being resumed.
14311                 for myroot in trees:
14312                         mysettings =  trees[myroot]["vartree"].settings
14313                         mysettings.unlock()
14314                         adjust_config(myopts, mysettings)
14315                         mysettings.lock()
14316                         del myroot, mysettings
14317
14318         ldpath_mtimes = mtimedb["ldpath"]
14319         favorites=[]
14320         merge_count = 0
14321         buildpkgonly = "--buildpkgonly" in myopts
14322         pretend = "--pretend" in myopts
14323         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14324         ask = "--ask" in myopts
14325         nodeps = "--nodeps" in myopts
14326         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14327         tree = "--tree" in myopts
14328         if nodeps and tree:
14329                 tree = False
14330                 del myopts["--tree"]
14331                 portage.writemsg(colorize("WARN", " * ") + \
14332                         "--tree is broken with --nodeps. Disabling...\n")
14333         debug = "--debug" in myopts
14334         verbose = "--verbose" in myopts
14335         quiet = "--quiet" in myopts
14336         if pretend or fetchonly:
14337                 # make the mtimedb readonly
14338                 mtimedb.filename = None
14339         if '--digest' in myopts or 'digest' in settings.features:
14340                 if '--digest' in myopts:
14341                         msg = "The --digest option"
14342                 else:
14343                         msg = "The FEATURES=digest setting"
14344
14345                 msg += " can prevent corruption from being" + \
14346                         " noticed. The `repoman manifest` command is the preferred" + \
14347                         " way to generate manifests and it is capable of doing an" + \
14348                         " entire repository or category at once."
14349                 prefix = bad(" * ")
14350                 writemsg(prefix + "\n")
14351                 from textwrap import wrap
14352                 for line in wrap(msg, 72):
14353                         writemsg("%s%s\n" % (prefix, line))
14354                 writemsg(prefix + "\n")
14355
14356         if "--quiet" not in myopts and \
14357                 ("--pretend" in myopts or "--ask" in myopts or \
14358                 "--tree" in myopts or "--verbose" in myopts):
14359                 action = ""
14360                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14361                         action = "fetched"
14362                 elif "--buildpkgonly" in myopts:
14363                         action = "built"
14364                 else:
14365                         action = "merged"
14366                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14367                         print
14368                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14369                         print
14370                 else:
14371                         print
14372                         print darkgreen("These are the packages that would be %s, in order:") % action
14373                         print
14374
14375         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14376         if not show_spinner:
14377                 spinner.update = spinner.update_quiet
14378
14379         if resume:
14380                 favorites = mtimedb["resume"].get("favorites")
14381                 if not isinstance(favorites, list):
14382                         favorites = []
14383
14384                 if show_spinner:
14385                         print "Calculating dependencies  ",
14386                 myparams = create_depgraph_params(myopts, myaction)
14387
14388                 resume_data = mtimedb["resume"]
14389                 mergelist = resume_data["mergelist"]
14390                 if mergelist and "--skipfirst" in myopts:
14391                         for i, task in enumerate(mergelist):
14392                                 if isinstance(task, list) and \
14393                                         task and task[-1] == "merge":
14394                                         del mergelist[i]
14395                                         break
14396
14397                 success = False
14398                 mydepgraph = None
14399                 try:
14400                         success, mydepgraph, dropped_tasks = resume_depgraph(
14401                                 settings, trees, mtimedb, myopts, myparams, spinner)
14402                 except (portage.exception.PackageNotFound,
14403                         depgraph.UnsatisfiedResumeDep), e:
14404                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14405                                 mydepgraph = e.depgraph
14406                         if show_spinner:
14407                                 print
14408                         from textwrap import wrap
14409                         from portage.output import EOutput
14410                         out = EOutput()
14411
14412                         resume_data = mtimedb["resume"]
14413                         mergelist = resume_data.get("mergelist")
14414                         if not isinstance(mergelist, list):
14415                                 mergelist = []
14416                         if mergelist and debug or (verbose and not quiet):
14417                                 out.eerror("Invalid resume list:")
14418                                 out.eerror("")
14419                                 indent = "  "
14420                                 for task in mergelist:
14421                                         if isinstance(task, list):
14422                                                 out.eerror(indent + str(tuple(task)))
14423                                 out.eerror("")
14424
14425                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14426                                 out.eerror("One or more packages are either masked or " + \
14427                                         "have missing dependencies:")
14428                                 out.eerror("")
14429                                 indent = "  "
14430                                 for dep in e.value:
14431                                         if dep.atom is None:
14432                                                 out.eerror(indent + "Masked package:")
14433                                                 out.eerror(2 * indent + str(dep.parent))
14434                                                 out.eerror("")
14435                                         else:
14436                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14437                                                 out.eerror(2 * indent + str(dep.parent))
14438                                                 out.eerror("")
14439                                 msg = "The resume list contains packages " + \
14440                                         "that are either masked or have " + \
14441                                         "unsatisfied dependencies. " + \
14442                                         "Please restart/continue " + \
14443                                         "the operation manually, or use --skipfirst " + \
14444                                         "to skip the first package in the list and " + \
14445                                         "any other packages that may be " + \
14446                                         "masked or have missing dependencies."
14447                                 for line in wrap(msg, 72):
14448                                         out.eerror(line)
14449                         elif isinstance(e, portage.exception.PackageNotFound):
14450                                 out.eerror("An expected package is " + \
14451                                         "not available: %s" % str(e))
14452                                 out.eerror("")
14453                                 msg = "The resume list contains one or more " + \
14454                                         "packages that are no longer " + \
14455                                         "available. Please restart/continue " + \
14456                                         "the operation manually."
14457                                 for line in wrap(msg, 72):
14458                                         out.eerror(line)
14459                 else:
14460                         if show_spinner:
14461                                 print "\b\b... done!"
14462
14463                 if success:
14464                         if dropped_tasks:
14465                                 portage.writemsg("!!! One or more packages have been " + \
14466                                         "dropped due to\n" + \
14467                                         "!!! masking or unsatisfied dependencies:\n\n",
14468                                         noiselevel=-1)
14469                                 for task in dropped_tasks:
14470                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14471                                 portage.writemsg("\n", noiselevel=-1)
14472                         del dropped_tasks
14473                 else:
14474                         if mydepgraph is not None:
14475                                 mydepgraph.display_problems()
14476                         if not (ask or pretend):
14477                                 # delete the current list and also the backup
14478                                 # since it's probably stale too.
14479                                 for k in ("resume", "resume_backup"):
14480                                         mtimedb.pop(k, None)
14481                                 mtimedb.commit()
14482
14483                         return 1
14484         else:
14485                 if ("--resume" in myopts):
14486                         print darkgreen("emerge: It seems we have nothing to resume...")
14487                         return os.EX_OK
14488
14489                 myparams = create_depgraph_params(myopts, myaction)
14490                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14491                         print "Calculating dependencies  ",
14492                         sys.stdout.flush()
14493                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14494                 try:
14495                         retval, favorites = mydepgraph.select_files(myfiles)
14496                 except portage.exception.PackageNotFound, e:
14497                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14498                         return 1
14499                 except portage.exception.PackageSetNotFound, e:
14500                         root_config = trees[settings["ROOT"]]["root_config"]
14501                         display_missing_pkg_set(root_config, e.value)
14502                         return 1
14503                 if show_spinner:
14504                         print "\b\b... done!"
14505                 if not retval:
14506                         mydepgraph.display_problems()
14507                         return 1
14508
14509         if "--pretend" not in myopts and \
14510                 ("--ask" in myopts or "--tree" in myopts or \
14511                 "--verbose" in myopts) and \
14512                 not ("--quiet" in myopts and "--ask" not in myopts):
14513                 if "--resume" in myopts:
14514                         mymergelist = mydepgraph.altlist()
14515                         if len(mymergelist) == 0:
14516                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14517                                 return os.EX_OK
14518                         favorites = mtimedb["resume"]["favorites"]
14519                         retval = mydepgraph.display(
14520                                 mydepgraph.altlist(reversed=tree),
14521                                 favorites=favorites)
14522                         mydepgraph.display_problems()
14523                         if retval != os.EX_OK:
14524                                 return retval
14525                         prompt="Would you like to resume merging these packages?"
14526                 else:
14527                         retval = mydepgraph.display(
14528                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14529                                 favorites=favorites)
14530                         mydepgraph.display_problems()
14531                         if retval != os.EX_OK:
14532                                 return retval
14533                         mergecount=0
14534                         for x in mydepgraph.altlist():
14535                                 if isinstance(x, Package) and x.operation == "merge":
14536                                         mergecount += 1
14537
14538                         if mergecount==0:
14539                                 sets = trees[settings["ROOT"]]["root_config"].sets
14540                                 world_candidates = None
14541                                 if "--noreplace" in myopts and \
14542                                         not oneshot and favorites:
14543                                         # Sets that are not world candidates are filtered
14544                                         # out here since the favorites list needs to be
14545                                         # complete for depgraph.loadResumeCommand() to
14546                                         # operate correctly.
14547                                         world_candidates = [x for x in favorites \
14548                                                 if not (x.startswith(SETPREFIX) and \
14549                                                 not sets[x[1:]].world_candidate)]
14550                                 if "--noreplace" in myopts and \
14551                                         not oneshot and world_candidates:
14552                                         print
14553                                         for x in world_candidates:
14554                                                 print " %s %s" % (good("*"), x)
14555                                         prompt="Would you like to add these packages to your world favorites?"
14556                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14557                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14558                                 else:
14559                                         print
14560                                         print "Nothing to merge; quitting."
14561                                         print
14562                                         return os.EX_OK
14563                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14564                                 prompt="Would you like to fetch the source files for these packages?"
14565                         else:
14566                                 prompt="Would you like to merge these packages?"
14567                 print
14568                 if "--ask" in myopts and userquery(prompt) == "No":
14569                         print
14570                         print "Quitting."
14571                         print
14572                         return os.EX_OK
14573                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14574                 myopts.pop("--ask", None)
14575
14576         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14577                 if ("--resume" in myopts):
14578                         mymergelist = mydepgraph.altlist()
14579                         if len(mymergelist) == 0:
14580                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14581                                 return os.EX_OK
14582                         favorites = mtimedb["resume"]["favorites"]
14583                         retval = mydepgraph.display(
14584                                 mydepgraph.altlist(reversed=tree),
14585                                 favorites=favorites)
14586                         mydepgraph.display_problems()
14587                         if retval != os.EX_OK:
14588                                 return retval
14589                 else:
14590                         retval = mydepgraph.display(
14591                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14592                                 favorites=favorites)
14593                         mydepgraph.display_problems()
14594                         if retval != os.EX_OK:
14595                                 return retval
14596                         if "--buildpkgonly" in myopts:
14597                                 graph_copy = mydepgraph.digraph.clone()
14598                                 removed_nodes = set()
14599                                 for node in graph_copy:
14600                                         if not isinstance(node, Package) or \
14601                                                 node.operation == "nomerge":
14602                                                 removed_nodes.add(node)
14603                                 graph_copy.difference_update(removed_nodes)
14604                                 if not graph_copy.hasallzeros(ignore_priority = \
14605                                         DepPrioritySatisfiedRange.ignore_medium):
14606                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14607                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14608                                         return 1
14609         else:
14610                 if "--buildpkgonly" in myopts:
14611                         graph_copy = mydepgraph.digraph.clone()
14612                         removed_nodes = set()
14613                         for node in graph_copy:
14614                                 if not isinstance(node, Package) or \
14615                                         node.operation == "nomerge":
14616                                         removed_nodes.add(node)
14617                         graph_copy.difference_update(removed_nodes)
14618                         if not graph_copy.hasallzeros(ignore_priority = \
14619                                 DepPrioritySatisfiedRange.ignore_medium):
14620                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14621                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14622                                 return 1
14623
14624                 if ("--resume" in myopts):
14625                         favorites=mtimedb["resume"]["favorites"]
14626                         mymergelist = mydepgraph.altlist()
14627                         mydepgraph.break_refs(mymergelist)
14628                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14629                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14630                         del mydepgraph, mymergelist
14631                         clear_caches(trees)
14632
14633                         retval = mergetask.merge()
14634                         merge_count = mergetask.curval
14635                 else:
14636                         if "resume" in mtimedb and \
14637                         "mergelist" in mtimedb["resume"] and \
14638                         len(mtimedb["resume"]["mergelist"]) > 1:
14639                                 mtimedb["resume_backup"] = mtimedb["resume"]
14640                                 del mtimedb["resume"]
14641                                 mtimedb.commit()
14642                         mtimedb["resume"]={}
14643                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14644                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14645                         # a list type for options.
14646                         mtimedb["resume"]["myopts"] = myopts.copy()
14647
14648                         # Convert Atom instances to plain str.
14649                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14650
14651                         pkglist = mydepgraph.altlist()
14652                         mydepgraph.saveNomergeFavorites()
14653                         mydepgraph.break_refs(pkglist)
14654                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14655                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14656                         del mydepgraph, pkglist
14657                         clear_caches(trees)
14658
14659                         retval = mergetask.merge()
14660                         merge_count = mergetask.curval
14661
14662                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14663                         if "yes" == settings.get("AUTOCLEAN"):
14664                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14665                                 unmerge(trees[settings["ROOT"]]["root_config"],
14666                                         myopts, "clean", [],
14667                                         ldpath_mtimes, autoclean=1)
14668                         else:
14669                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14670                                         + " AUTOCLEAN is disabled.  This can cause serious"
14671                                         + " problems due to overlapping packages.\n")
14672                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14673
14674                 return retval
14675
14676 def multiple_actions(action1, action2):
14677         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14678         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14679         sys.exit(1)
14680
14681 def insert_optional_args(args):
14682         """
14683         Parse optional arguments and insert a value if one has
14684         not been provided. This is done before feeding the args
14685         to the optparse parser since that parser does not support
14686         this feature natively.
14687         """
14688
14689         new_args = []
14690         jobs_opts = ("-j", "--jobs")
14691         arg_stack = args[:]
14692         arg_stack.reverse()
14693         while arg_stack:
14694                 arg = arg_stack.pop()
14695
14696                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14697                 if not (short_job_opt or arg in jobs_opts):
14698                         new_args.append(arg)
14699                         continue
14700
14701                 # Insert an empty placeholder in order to
14702                 # satisfy the requirements of optparse.
14703
14704                 new_args.append("--jobs")
14705                 job_count = None
14706                 saved_opts = None
14707                 if short_job_opt and len(arg) > 2:
14708                         if arg[:2] == "-j":
14709                                 try:
14710                                         job_count = int(arg[2:])
14711                                 except ValueError:
14712                                         saved_opts = arg[2:]
14713                         else:
14714                                 job_count = "True"
14715                                 saved_opts = arg[1:].replace("j", "")
14716
14717                 if job_count is None and arg_stack:
14718                         try:
14719                                 job_count = int(arg_stack[-1])
14720                         except ValueError:
14721                                 pass
14722                         else:
14723                                 # Discard the job count from the stack
14724                                 # since we're consuming it here.
14725                                 arg_stack.pop()
14726
14727                 if job_count is None:
14728                         # unlimited number of jobs
14729                         new_args.append("True")
14730                 else:
14731                         new_args.append(str(job_count))
14732
14733                 if saved_opts is not None:
14734                         new_args.append("-" + saved_opts)
14735
14736         return new_args
14737
14738 def parse_opts(tmpcmdline, silent=False):
14739         myaction=None
14740         myopts = {}
14741         myfiles=[]
14742
14743         global actions, options, shortmapping
14744
14745         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14746         argument_options = {
14747                 "--config-root": {
14748                         "help":"specify the location for portage configuration files",
14749                         "action":"store"
14750                 },
14751                 "--color": {
14752                         "help":"enable or disable color output",
14753                         "type":"choice",
14754                         "choices":("y", "n")
14755                 },
14756
14757                 "--jobs": {
14758
14759                         "help"   : "Specifies the number of packages to build " + \
14760                                 "simultaneously.",
14761
14762                         "action" : "store"
14763                 },
14764
14765                 "--load-average": {
14766
14767                         "help"   :"Specifies that no new builds should be started " + \
14768                                 "if there are other builds running and the load average " + \
14769                                 "is at least LOAD (a floating-point number).",
14770
14771                         "action" : "store"
14772                 },
14773
14774                 "--with-bdeps": {
14775                         "help":"include unnecessary build time dependencies",
14776                         "type":"choice",
14777                         "choices":("y", "n")
14778                 },
14779                 "--reinstall": {
14780                         "help":"specify conditions to trigger package reinstallation",
14781                         "type":"choice",
14782                         "choices":["changed-use"]
14783                 }
14784         }
14785
14786         from optparse import OptionParser
14787         parser = OptionParser()
14788         if parser.has_option("--help"):
14789                 parser.remove_option("--help")
14790
14791         for action_opt in actions:
14792                 parser.add_option("--" + action_opt, action="store_true",
14793                         dest=action_opt.replace("-", "_"), default=False)
14794         for myopt in options:
14795                 parser.add_option(myopt, action="store_true",
14796                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14797         for shortopt, longopt in shortmapping.iteritems():
14798                 parser.add_option("-" + shortopt, action="store_true",
14799                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14800         for myalias, myopt in longopt_aliases.iteritems():
14801                 parser.add_option(myalias, action="store_true",
14802                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14803
14804         for myopt, kwargs in argument_options.iteritems():
14805                 parser.add_option(myopt,
14806                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14807
14808         tmpcmdline = insert_optional_args(tmpcmdline)
14809
14810         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14811
14812         if myoptions.jobs:
14813                 jobs = None
14814                 if myoptions.jobs == "True":
14815                         jobs = True
14816                 else:
14817                         try:
14818                                 jobs = int(myoptions.jobs)
14819                         except ValueError:
14820                                 jobs = -1
14821
14822                 if jobs is not True and \
14823                         jobs < 1:
14824                         jobs = None
14825                         if not silent:
14826                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14827                                         (myoptions.jobs,), noiselevel=-1)
14828
14829                 myoptions.jobs = jobs
14830
14831         if myoptions.load_average:
14832                 try:
14833                         load_average = float(myoptions.load_average)
14834                 except ValueError:
14835                         load_average = 0.0
14836
14837                 if load_average <= 0.0:
14838                         load_average = None
14839                         if not silent:
14840                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14841                                         (myoptions.load_average,), noiselevel=-1)
14842
14843                 myoptions.load_average = load_average
14844
14845         for myopt in options:
14846                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14847                 if v:
14848                         myopts[myopt] = True
14849
14850         for myopt in argument_options:
14851                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14852                 if v is not None:
14853                         myopts[myopt] = v
14854
14855         if myoptions.searchdesc:
14856                 myoptions.search = True
14857
14858         for action_opt in actions:
14859                 v = getattr(myoptions, action_opt.replace("-", "_"))
14860                 if v:
14861                         if myaction:
14862                                 multiple_actions(myaction, action_opt)
14863                                 sys.exit(1)
14864                         myaction = action_opt
14865
14866         myfiles += myargs
14867
14868         return myaction, myopts, myfiles
14869
14870 def validate_ebuild_environment(trees):
14871         for myroot in trees:
14872                 settings = trees[myroot]["vartree"].settings
14873                 settings.validate()
14874
14875 def clear_caches(trees):
14876         for d in trees.itervalues():
14877                 d["porttree"].dbapi.melt()
14878                 d["porttree"].dbapi._aux_cache.clear()
14879                 d["bintree"].dbapi._aux_cache.clear()
14880                 d["bintree"].dbapi._clear_cache()
14881                 d["vartree"].dbapi.linkmap._clear_cache()
14882         portage.dircache.clear()
14883         gc.collect()
14884
14885 def load_emerge_config(trees=None):
14886         kwargs = {}
14887         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14888                 v = os.environ.get(envvar, None)
14889                 if v and v.strip():
14890                         kwargs[k] = v
14891         trees = portage.create_trees(trees=trees, **kwargs)
14892
14893         for root, root_trees in trees.iteritems():
14894                 settings = root_trees["vartree"].settings
14895                 setconfig = load_default_config(settings, root_trees)
14896                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14897
14898         settings = trees["/"]["vartree"].settings
14899
14900         for myroot in trees:
14901                 if myroot != "/":
14902                         settings = trees[myroot]["vartree"].settings
14903                         break
14904
14905         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14906         mtimedb = portage.MtimeDB(mtimedbfile)
14907         
14908         return settings, trees, mtimedb
14909
14910 def adjust_config(myopts, settings):
14911         """Make emerge specific adjustments to the config."""
14912
14913         # To enhance usability, make some vars case insensitive by forcing them to
14914         # lower case.
14915         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14916                 if myvar in settings:
14917                         settings[myvar] = settings[myvar].lower()
14918                         settings.backup_changes(myvar)
14919         del myvar
14920
14921         # Kill noauto as it will break merges otherwise.
14922         if "noauto" in settings.features:
14923                 settings.features.remove('noauto')
14924                 settings['FEATURES'] = ' '.join(sorted(settings.features))
14925                 settings.backup_changes("FEATURES")
14926
14927         CLEAN_DELAY = 5
14928         try:
14929                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14930         except ValueError, e:
14931                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14932                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14933                         settings["CLEAN_DELAY"], noiselevel=-1)
14934         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14935         settings.backup_changes("CLEAN_DELAY")
14936
14937         EMERGE_WARNING_DELAY = 10
14938         try:
14939                 EMERGE_WARNING_DELAY = int(settings.get(
14940                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14941         except ValueError, e:
14942                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14943                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14944                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14945         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14946         settings.backup_changes("EMERGE_WARNING_DELAY")
14947
14948         if "--quiet" in myopts:
14949                 settings["PORTAGE_QUIET"]="1"
14950                 settings.backup_changes("PORTAGE_QUIET")
14951
14952         if "--verbose" in myopts:
14953                 settings["PORTAGE_VERBOSE"] = "1"
14954                 settings.backup_changes("PORTAGE_VERBOSE")
14955
14956         # Set so that configs will be merged regardless of remembered status
14957         if ("--noconfmem" in myopts):
14958                 settings["NOCONFMEM"]="1"
14959                 settings.backup_changes("NOCONFMEM")
14960
14961         # Set various debug markers... They should be merged somehow.
14962         PORTAGE_DEBUG = 0
14963         try:
14964                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14965                 if PORTAGE_DEBUG not in (0, 1):
14966                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14967                                 PORTAGE_DEBUG, noiselevel=-1)
14968                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14969                                 noiselevel=-1)
14970                         PORTAGE_DEBUG = 0
14971         except ValueError, e:
14972                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14973                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14974                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14975                 del e
14976         if "--debug" in myopts:
14977                 PORTAGE_DEBUG = 1
14978         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14979         settings.backup_changes("PORTAGE_DEBUG")
14980
14981         if settings.get("NOCOLOR") not in ("yes","true"):
14982                 portage.output.havecolor = 1
14983
14984         """The explicit --color < y | n > option overrides the NOCOLOR environment
14985         variable and stdout auto-detection."""
14986         if "--color" in myopts:
14987                 if "y" == myopts["--color"]:
14988                         portage.output.havecolor = 1
14989                         settings["NOCOLOR"] = "false"
14990                 else:
14991                         portage.output.havecolor = 0
14992                         settings["NOCOLOR"] = "true"
14993                 settings.backup_changes("NOCOLOR")
14994         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14995                 portage.output.havecolor = 0
14996                 settings["NOCOLOR"] = "true"
14997                 settings.backup_changes("NOCOLOR")
14998
14999 def apply_priorities(settings):
15000         ionice(settings)
15001         nice(settings)
15002
15003 def nice(settings):
15004         try:
15005                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15006         except (OSError, ValueError), e:
15007                 out = portage.output.EOutput()
15008                 out.eerror("Failed to change nice value to '%s'" % \
15009                         settings["PORTAGE_NICENESS"])
15010                 out.eerror("%s\n" % str(e))
15011
15012 def ionice(settings):
15013
15014         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15015         if ionice_cmd:
15016                 ionice_cmd = shlex.split(ionice_cmd)
15017         if not ionice_cmd:
15018                 return
15019
15020         from portage.util import varexpand
15021         variables = {"PID" : str(os.getpid())}
15022         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15023
15024         try:
15025                 rval = portage.process.spawn(cmd, env=os.environ)
15026         except portage.exception.CommandNotFound:
15027                 # The OS kernel probably doesn't support ionice,
15028                 # so return silently.
15029                 return
15030
15031         if rval != os.EX_OK:
15032                 out = portage.output.EOutput()
15033                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15034                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15035
15036 def display_missing_pkg_set(root_config, set_name):
15037
15038         msg = []
15039         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15040                 "The following sets exist:") % \
15041                 colorize("INFORM", set_name))
15042         msg.append("")
15043
15044         for s in sorted(root_config.sets):
15045                 msg.append("    %s" % s)
15046         msg.append("")
15047
15048         writemsg_level("".join("%s\n" % l for l in msg),
15049                 level=logging.ERROR, noiselevel=-1)
15050
15051 def expand_set_arguments(myfiles, myaction, root_config):
15052         retval = os.EX_OK
15053         setconfig = root_config.setconfig
15054
15055         sets = setconfig.getSets()
15056
15057         # In order to know exactly which atoms/sets should be added to the
15058         # world file, the depgraph performs set expansion later. It will get
15059         # confused about where the atoms came from if it's not allowed to
15060         # expand them itself.
15061         do_not_expand = (None, )
15062         newargs = []
15063         for a in myfiles:
15064                 if a in ("system", "world"):
15065                         newargs.append(SETPREFIX+a)
15066                 else:
15067                         newargs.append(a)
15068         myfiles = newargs
15069         del newargs
15070         newargs = []
15071
15072         # separators for set arguments
15073         ARG_START = "{"
15074         ARG_END = "}"
15075
15076         # WARNING: all operators must be of equal length
15077         IS_OPERATOR = "/@"
15078         DIFF_OPERATOR = "-@"
15079         UNION_OPERATOR = "+@"
15080         
15081         for i in range(0, len(myfiles)):
15082                 if myfiles[i].startswith(SETPREFIX):
15083                         start = 0
15084                         end = 0
15085                         x = myfiles[i][len(SETPREFIX):]
15086                         newset = ""
15087                         while x:
15088                                 start = x.find(ARG_START)
15089                                 end = x.find(ARG_END)
15090                                 if start > 0 and start < end:
15091                                         namepart = x[:start]
15092                                         argpart = x[start+1:end]
15093                                 
15094                                         # TODO: implement proper quoting
15095                                         args = argpart.split(",")
15096                                         options = {}
15097                                         for a in args:
15098                                                 if "=" in a:
15099                                                         k, v  = a.split("=", 1)
15100                                                         options[k] = v
15101                                                 else:
15102                                                         options[a] = "True"
15103                                         setconfig.update(namepart, options)
15104                                         newset += (x[:start-len(namepart)]+namepart)
15105                                         x = x[end+len(ARG_END):]
15106                                 else:
15107                                         newset += x
15108                                         x = ""
15109                         myfiles[i] = SETPREFIX+newset
15110                                 
15111         sets = setconfig.getSets()
15112
15113         # display errors that occured while loading the SetConfig instance
15114         for e in setconfig.errors:
15115                 print colorize("BAD", "Error during set creation: %s" % e)
15116         
15117         # emerge relies on the existance of sets with names "world" and "system"
15118         required_sets = ("world", "system")
15119         missing_sets = []
15120
15121         for s in required_sets:
15122                 if s not in sets:
15123                         missing_sets.append(s)
15124         if missing_sets:
15125                 if len(missing_sets) > 2:
15126                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15127                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15128                 elif len(missing_sets) == 2:
15129                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15130                 else:
15131                         missing_sets_str = '"%s"' % missing_sets[-1]
15132                 msg = ["emerge: incomplete set configuration, " + \
15133                         "missing set(s): %s" % missing_sets_str]
15134                 if sets:
15135                         msg.append("        sets defined: %s" % ", ".join(sets))
15136                 msg.append("        This usually means that '%s'" % \
15137                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15138                 msg.append("        is missing or corrupt.")
15139                 for line in msg:
15140                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15141                 return (None, 1)
15142         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15143
15144         for a in myfiles:
15145                 if a.startswith(SETPREFIX):
15146                         # support simple set operations (intersection, difference and union)
15147                         # on the commandline. Expressions are evaluated strictly left-to-right
15148                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15149                                 expression = a[len(SETPREFIX):]
15150                                 expr_sets = []
15151                                 expr_ops = []
15152                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15153                                         is_pos = expression.rfind(IS_OPERATOR)
15154                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15155                                         union_pos = expression.rfind(UNION_OPERATOR)
15156                                         op_pos = max(is_pos, diff_pos, union_pos)
15157                                         s1 = expression[:op_pos]
15158                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15159                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15160                                         if not s2 in sets:
15161                                                 display_missing_pkg_set(root_config, s2)
15162                                                 return (None, 1)
15163                                         expr_sets.insert(0, s2)
15164                                         expr_ops.insert(0, op)
15165                                         expression = s1
15166                                 if not expression in sets:
15167                                         display_missing_pkg_set(root_config, expression)
15168                                         return (None, 1)
15169                                 expr_sets.insert(0, expression)
15170                                 result = set(setconfig.getSetAtoms(expression))
15171                                 for i in range(0, len(expr_ops)):
15172                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15173                                         if expr_ops[i] == IS_OPERATOR:
15174                                                 result.intersection_update(s2)
15175                                         elif expr_ops[i] == DIFF_OPERATOR:
15176                                                 result.difference_update(s2)
15177                                         elif expr_ops[i] == UNION_OPERATOR:
15178                                                 result.update(s2)
15179                                         else:
15180                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15181                                 newargs.extend(result)
15182                         else:                   
15183                                 s = a[len(SETPREFIX):]
15184                                 if s not in sets:
15185                                         display_missing_pkg_set(root_config, s)
15186                                         return (None, 1)
15187                                 setconfig.active.append(s)
15188                                 try:
15189                                         set_atoms = setconfig.getSetAtoms(s)
15190                                 except portage.exception.PackageSetNotFound, e:
15191                                         writemsg_level(("emerge: the given set '%s' " + \
15192                                                 "contains a non-existent set named '%s'.\n") % \
15193                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15194                                         return (None, 1)
15195                                 if myaction in unmerge_actions and \
15196                                                 not sets[s].supportsOperation("unmerge"):
15197                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15198                                                 "not support unmerge operations\n")
15199                                         retval = 1
15200                                 elif not set_atoms:
15201                                         print "emerge: '%s' is an empty set" % s
15202                                 elif myaction not in do_not_expand:
15203                                         newargs.extend(set_atoms)
15204                                 else:
15205                                         newargs.append(SETPREFIX+s)
15206                                 for e in sets[s].errors:
15207                                         print e
15208                 else:
15209                         newargs.append(a)
15210         return (newargs, retval)
15211
15212 def repo_name_check(trees):
15213         missing_repo_names = set()
15214         for root, root_trees in trees.iteritems():
15215                 if "porttree" in root_trees:
15216                         portdb = root_trees["porttree"].dbapi
15217                         missing_repo_names.update(portdb.porttrees)
15218                         repos = portdb.getRepositories()
15219                         for r in repos:
15220                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15221                         if portdb.porttree_root in missing_repo_names and \
15222                                 not os.path.exists(os.path.join(
15223                                 portdb.porttree_root, "profiles")):
15224                                 # This is normal if $PORTDIR happens to be empty,
15225                                 # so don't warn about it.
15226                                 missing_repo_names.remove(portdb.porttree_root)
15227
15228         if missing_repo_names:
15229                 msg = []
15230                 msg.append("WARNING: One or more repositories " + \
15231                         "have missing repo_name entries:")
15232                 msg.append("")
15233                 for p in missing_repo_names:
15234                         msg.append("\t%s/profiles/repo_name" % (p,))
15235                 msg.append("")
15236                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15237                         "should be a plain text file containing a unique " + \
15238                         "name for the repository on the first line.", 70))
15239                 writemsg_level("".join("%s\n" % l for l in msg),
15240                         level=logging.WARNING, noiselevel=-1)
15241
15242         return bool(missing_repo_names)
15243
15244 def config_protect_check(trees):
15245         for root, root_trees in trees.iteritems():
15246                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15247                         msg = "!!! CONFIG_PROTECT is empty"
15248                         if root != "/":
15249                                 msg += " for '%s'" % root
15250                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15251
15252 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15253
15254         if "--quiet" in myopts:
15255                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15256                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15257                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15258                         print "    " + colorize("INFORM", cp)
15259                 return
15260
15261         s = search(root_config, spinner, "--searchdesc" in myopts,
15262                 "--quiet" not in myopts, "--usepkg" in myopts,
15263                 "--usepkgonly" in myopts)
15264         null_cp = portage.dep_getkey(insert_category_into_atom(
15265                 arg, "null"))
15266         cat, atom_pn = portage.catsplit(null_cp)
15267         s.searchkey = atom_pn
15268         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15269                 s.addCP(cp)
15270         s.output()
15271         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15272         print "!!! one of the above fully-qualified ebuild names instead.\n"
15273
15274 def profile_check(trees, myaction, myopts):
15275         if myaction in ("info", "sync"):
15276                 return os.EX_OK
15277         elif "--version" in myopts or "--help" in myopts:
15278                 return os.EX_OK
15279         for root, root_trees in trees.iteritems():
15280                 if root_trees["root_config"].settings.profiles:
15281                         continue
15282                 # generate some profile related warning messages
15283                 validate_ebuild_environment(trees)
15284                 msg = "If you have just changed your profile configuration, you " + \
15285                         "should revert back to the previous configuration. Due to " + \
15286                         "your current profile being invalid, allowed actions are " + \
15287                         "limited to --help, --info, --sync, and --version."
15288                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15289                         level=logging.ERROR, noiselevel=-1)
15290                 return 1
15291         return os.EX_OK
15292
15293 def emerge_main():
15294         global portage  # NFC why this is necessary now - genone
15295         portage._disable_legacy_globals()
15296         # Disable color until we're sure that it should be enabled (after
15297         # EMERGE_DEFAULT_OPTS has been parsed).
15298         portage.output.havecolor = 0
15299         # This first pass is just for options that need to be known as early as
15300         # possible, such as --config-root.  They will be parsed again later,
15301         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15302         # the value of --config-root).
15303         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15304         if "--debug" in myopts:
15305                 os.environ["PORTAGE_DEBUG"] = "1"
15306         if "--config-root" in myopts:
15307                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15308
15309         # Portage needs to ensure a sane umask for the files it creates.
15310         os.umask(022)
15311         settings, trees, mtimedb = load_emerge_config()
15312         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15313         rval = profile_check(trees, myaction, myopts)
15314         if rval != os.EX_OK:
15315                 return rval
15316
15317         if portage._global_updates(trees, mtimedb["updates"]):
15318                 mtimedb.commit()
15319                 # Reload the whole config from scratch.
15320                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15321                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15322
15323         xterm_titles = "notitles" not in settings.features
15324
15325         tmpcmdline = []
15326         if "--ignore-default-opts" not in myopts:
15327                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15328         tmpcmdline.extend(sys.argv[1:])
15329         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15330
15331         if "--digest" in myopts:
15332                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15333                 # Reload the whole config from scratch so that the portdbapi internal
15334                 # config is updated with new FEATURES.
15335                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15336                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15337
15338         for myroot in trees:
15339                 mysettings =  trees[myroot]["vartree"].settings
15340                 mysettings.unlock()
15341                 adjust_config(myopts, mysettings)
15342                 if '--pretend' not in myopts and myaction in \
15343                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15344                         mysettings["PORTAGE_COUNTER_HASH"] = \
15345                                 trees[myroot]["vartree"].dbapi._counter_hash()
15346                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15347                 mysettings.lock()
15348                 del myroot, mysettings
15349
15350         apply_priorities(settings)
15351
15352         spinner = stdout_spinner()
15353         if "candy" in settings.features:
15354                 spinner.update = spinner.update_scroll
15355
15356         if "--quiet" not in myopts:
15357                 portage.deprecated_profile_check(settings=settings)
15358                 repo_name_check(trees)
15359                 config_protect_check(trees)
15360
15361         eclasses_overridden = {}
15362         for mytrees in trees.itervalues():
15363                 mydb = mytrees["porttree"].dbapi
15364                 # Freeze the portdbapi for performance (memoize all xmatch results).
15365                 mydb.freeze()
15366                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15367         del mytrees, mydb
15368
15369         if eclasses_overridden and \
15370                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15371                 prefix = bad(" * ")
15372                 if len(eclasses_overridden) == 1:
15373                         writemsg(prefix + "Overlay eclass overrides " + \
15374                                 "eclass from PORTDIR:\n", noiselevel=-1)
15375                 else:
15376                         writemsg(prefix + "Overlay eclasses override " + \
15377                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15378                 writemsg(prefix + "\n", noiselevel=-1)
15379                 for eclass_name in sorted(eclasses_overridden):
15380                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15381                                 (eclasses_overridden[eclass_name], eclass_name),
15382                                 noiselevel=-1)
15383                 writemsg(prefix + "\n", noiselevel=-1)
15384                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15385                 "because it will trigger invalidation of cached ebuild metadata " + \
15386                 "that is distributed with the portage tree. If you must " + \
15387                 "override eclasses from PORTDIR then you are advised to add " + \
15388                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15389                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15390                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15391                 "you would like to disable this warning."
15392                 from textwrap import wrap
15393                 for line in wrap(msg, 72):
15394                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15395
15396         if "moo" in myfiles:
15397                 print """
15398
15399   Larry loves Gentoo (""" + platform.system() + """)
15400
15401  _______________________
15402 < Have you mooed today? >
15403  -----------------------
15404         \   ^__^
15405          \  (oo)\_______
15406             (__)\       )\/\ 
15407                 ||----w |
15408                 ||     ||
15409
15410 """
15411
15412         for x in myfiles:
15413                 ext = os.path.splitext(x)[1]
15414                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15415                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15416                         break
15417
15418         root_config = trees[settings["ROOT"]]["root_config"]
15419         if myaction == "list-sets":
15420                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15421                 sys.stdout.flush()
15422                 return os.EX_OK
15423
15424         # only expand sets for actions taking package arguments
15425         oldargs = myfiles[:]
15426         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15427                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15428                 if retval != os.EX_OK:
15429                         return retval
15430
15431                 # Need to handle empty sets specially, otherwise emerge will react 
15432                 # with the help message for empty argument lists
15433                 if oldargs and not myfiles:
15434                         print "emerge: no targets left after set expansion"
15435                         return 0
15436
15437         if ("--tree" in myopts) and ("--columns" in myopts):
15438                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15439                 return 1
15440
15441         if ("--quiet" in myopts):
15442                 spinner.update = spinner.update_quiet
15443                 portage.util.noiselimit = -1
15444
15445         # Always create packages if FEATURES=buildpkg
15446         # Imply --buildpkg if --buildpkgonly
15447         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15448                 if "--buildpkg" not in myopts:
15449                         myopts["--buildpkg"] = True
15450
15451         # Always try and fetch binary packages if FEATURES=getbinpkg
15452         if ("getbinpkg" in settings.features):
15453                 myopts["--getbinpkg"] = True
15454
15455         if "--buildpkgonly" in myopts:
15456                 # --buildpkgonly will not merge anything, so
15457                 # it cancels all binary package options.
15458                 for opt in ("--getbinpkg", "--getbinpkgonly",
15459                         "--usepkg", "--usepkgonly"):
15460                         myopts.pop(opt, None)
15461
15462         if "--fetch-all-uri" in myopts:
15463                 myopts["--fetchonly"] = True
15464
15465         if "--skipfirst" in myopts and "--resume" not in myopts:
15466                 myopts["--resume"] = True
15467
15468         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15469                 myopts["--usepkgonly"] = True
15470
15471         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15472                 myopts["--getbinpkg"] = True
15473
15474         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15475                 myopts["--usepkg"] = True
15476
15477         # Also allow -K to apply --usepkg/-k
15478         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15479                 myopts["--usepkg"] = True
15480
15481         # Allow -p to remove --ask
15482         if ("--pretend" in myopts) and ("--ask" in myopts):
15483                 print ">>> --pretend disables --ask... removing --ask from options."
15484                 del myopts["--ask"]
15485
15486         # forbid --ask when not in a terminal
15487         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15488         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15489                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15490                         noiselevel=-1)
15491                 return 1
15492
15493         if settings.get("PORTAGE_DEBUG", "") == "1":
15494                 spinner.update = spinner.update_quiet
15495                 portage.debug=1
15496                 if "python-trace" in settings.features:
15497                         import portage.debug
15498                         portage.debug.set_trace(True)
15499
15500         if not ("--quiet" in myopts):
15501                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15502                         spinner.update = spinner.update_basic
15503
15504         if myaction == 'version':
15505                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15506                         settings.profile_path, settings["CHOST"],
15507                         trees[settings["ROOT"]]["vartree"].dbapi)
15508                 return 0
15509         elif "--help" in myopts:
15510                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15511                 return 0
15512
15513         if "--debug" in myopts:
15514                 print "myaction", myaction
15515                 print "myopts", myopts
15516
15517         if not myaction and not myfiles and "--resume" not in myopts:
15518                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15519                 return 1
15520
15521         pretend = "--pretend" in myopts
15522         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15523         buildpkgonly = "--buildpkgonly" in myopts
15524
15525         # check if root user is the current user for the actions where emerge needs this
15526         if portage.secpass < 2:
15527                 # We've already allowed "--version" and "--help" above.
15528                 if "--pretend" not in myopts and myaction not in ("search","info"):
15529                         need_superuser = not \
15530                                 (fetchonly or \
15531                                 (buildpkgonly and secpass >= 1) or \
15532                                 myaction in ("metadata", "regen") or \
15533                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15534                         if portage.secpass < 1 or \
15535                                 need_superuser:
15536                                 if need_superuser:
15537                                         access_desc = "superuser"
15538                                 else:
15539                                         access_desc = "portage group"
15540                                 # Always show portage_group_warning() when only portage group
15541                                 # access is required but the user is not in the portage group.
15542                                 from portage.data import portage_group_warning
15543                                 if "--ask" in myopts:
15544                                         myopts["--pretend"] = True
15545                                         del myopts["--ask"]
15546                                         print ("%s access is required... " + \
15547                                                 "adding --pretend to options.\n") % access_desc
15548                                         if portage.secpass < 1 and not need_superuser:
15549                                                 portage_group_warning()
15550                                 else:
15551                                         sys.stderr.write(("emerge: %s access is " + \
15552                                                 "required.\n\n") % access_desc)
15553                                         if portage.secpass < 1 and not need_superuser:
15554                                                 portage_group_warning()
15555                                         return 1
15556
15557         disable_emergelog = False
15558         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15559                 if x in myopts:
15560                         disable_emergelog = True
15561                         break
15562         if myaction in ("search", "info"):
15563                 disable_emergelog = True
15564         if disable_emergelog:
15565                 """ Disable emergelog for everything except build or unmerge
15566                 operations.  This helps minimize parallel emerge.log entries that can
15567                 confuse log parsers.  We especially want it disabled during
15568                 parallel-fetch, which uses --resume --fetchonly."""
15569                 global emergelog
15570                 def emergelog(*pargs, **kargs):
15571                         pass
15572
15573         if not "--pretend" in myopts:
15574                 emergelog(xterm_titles, "Started emerge on: "+\
15575                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15576                 myelogstr=""
15577                 if myopts:
15578                         myelogstr=" ".join(myopts)
15579                 if myaction:
15580                         myelogstr+=" "+myaction
15581                 if myfiles:
15582                         myelogstr += " " + " ".join(oldargs)
15583                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15584         del oldargs
15585
15586         def emergeexitsig(signum, frame):
15587                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15588                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15589                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15590                 sys.exit(100+signum)
15591         signal.signal(signal.SIGINT, emergeexitsig)
15592         signal.signal(signal.SIGTERM, emergeexitsig)
15593
15594         def emergeexit():
15595                 """This gets out final log message in before we quit."""
15596                 if "--pretend" not in myopts:
15597                         emergelog(xterm_titles, " *** terminating.")
15598                 if "notitles" not in settings.features:
15599                         xtermTitleReset()
15600         portage.atexit_register(emergeexit)
15601
15602         if myaction in ("config", "metadata", "regen", "sync"):
15603                 if "--pretend" in myopts:
15604                         sys.stderr.write(("emerge: The '%s' action does " + \
15605                                 "not support '--pretend'.\n") % myaction)
15606                         return 1
15607
15608         if "sync" == myaction:
15609                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15610         elif "metadata" == myaction:
15611                 action_metadata(settings, portdb, myopts)
15612         elif myaction=="regen":
15613                 validate_ebuild_environment(trees)
15614                 return action_regen(settings, portdb, myopts.get("--jobs"),
15615                         myopts.get("--load-average"))
15616         # HELP action
15617         elif "config"==myaction:
15618                 validate_ebuild_environment(trees)
15619                 action_config(settings, trees, myopts, myfiles)
15620
15621         # SEARCH action
15622         elif "search"==myaction:
15623                 validate_ebuild_environment(trees)
15624                 action_search(trees[settings["ROOT"]]["root_config"],
15625                         myopts, myfiles, spinner)
15626         elif myaction in ("clean", "unmerge") or \
15627                 (myaction == "prune" and "--nodeps" in myopts):
15628                 validate_ebuild_environment(trees)
15629
15630                 # Ensure atoms are valid before calling unmerge().
15631                 # For backward compat, leading '=' is not required.
15632                 for x in myfiles:
15633                         if is_valid_package_atom(x) or \
15634                                 is_valid_package_atom("=" + x):
15635                                 continue
15636                         msg = []
15637                         msg.append("'%s' is not a valid package atom." % (x,))
15638                         msg.append("Please check ebuild(5) for full details.")
15639                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15640                                 level=logging.ERROR, noiselevel=-1)
15641                         return 1
15642
15643                 # When given a list of atoms, unmerge
15644                 # them in the order given.
15645                 ordered = myaction == "unmerge"
15646                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15647                         mtimedb["ldpath"], ordered=ordered):
15648                         if not (buildpkgonly or fetchonly or pretend):
15649                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15650
15651         elif myaction in ("depclean", "info", "prune"):
15652
15653                 # Ensure atoms are valid before calling unmerge().
15654                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15655                 valid_atoms = []
15656                 for x in myfiles:
15657                         if is_valid_package_atom(x):
15658                                 try:
15659                                         valid_atoms.append(
15660                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15661                                 except portage.exception.AmbiguousPackageName, e:
15662                                         msg = "The short ebuild name \"" + x + \
15663                                                 "\" is ambiguous.  Please specify " + \
15664                                                 "one of the following " + \
15665                                                 "fully-qualified ebuild names instead:"
15666                                         for line in textwrap.wrap(msg, 70):
15667                                                 writemsg_level("!!! %s\n" % (line,),
15668                                                         level=logging.ERROR, noiselevel=-1)
15669                                         for i in e[0]:
15670                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15671                                                         level=logging.ERROR, noiselevel=-1)
15672                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15673                                         return 1
15674                                 continue
15675                         msg = []
15676                         msg.append("'%s' is not a valid package atom." % (x,))
15677                         msg.append("Please check ebuild(5) for full details.")
15678                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15679                                 level=logging.ERROR, noiselevel=-1)
15680                         return 1
15681
15682                 if myaction == "info":
15683                         return action_info(settings, trees, myopts, valid_atoms)
15684
15685                 validate_ebuild_environment(trees)
15686                 action_depclean(settings, trees, mtimedb["ldpath"],
15687                         myopts, myaction, valid_atoms, spinner)
15688                 if not (buildpkgonly or fetchonly or pretend):
15689                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15690         # "update", "system", or just process files:
15691         else:
15692                 validate_ebuild_environment(trees)
15693
15694                 for x in myfiles:
15695                         if x.startswith(SETPREFIX) or \
15696                                 is_valid_package_atom(x):
15697                                 continue
15698                         if x[:1] == os.sep:
15699                                 continue
15700                         try:
15701                                 os.lstat(x)
15702                                 continue
15703                         except OSError:
15704                                 pass
15705                         msg = []
15706                         msg.append("'%s' is not a valid package atom." % (x,))
15707                         msg.append("Please check ebuild(5) for full details.")
15708                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15709                                 level=logging.ERROR, noiselevel=-1)
15710                         return 1
15711
15712                 if "--pretend" not in myopts:
15713                         display_news_notification(root_config, myopts)
15714                 retval = action_build(settings, trees, mtimedb,
15715                         myopts, myaction, myfiles, spinner)
15716                 root_config = trees[settings["ROOT"]]["root_config"]
15717                 post_emerge(root_config, myopts, mtimedb, retval)
15718
15719                 return retval