Bug #264435 - Handle EAGAIN errors when writing to stdout, due to poorly
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if isinstance(mysize, basestring):
282                 return mysize
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 if setconfig is None:
774                         self.sets = {}
775                 else:
776                         self.sets = self.setconfig.getSets()
777                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
778
779 def create_world_atom(pkg, args_set, root_config):
780         """Create a new atom for the world file if one does not exist.  If the
781         argument atom is precise enough to identify a specific slot then a slot
782         atom will be returned. Atoms that are in the system set may also be stored
783         in world since system atoms can only match one slot while world atoms can
784         be greedy with respect to slots.  Unslotted system packages will not be
785         stored in world."""
786
787         arg_atom = args_set.findAtomForPackage(pkg)
788         if not arg_atom:
789                 return None
790         cp = portage.dep_getkey(arg_atom)
791         new_world_atom = cp
792         sets = root_config.sets
793         portdb = root_config.trees["porttree"].dbapi
794         vardb = root_config.trees["vartree"].dbapi
795         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796                 for cpv in portdb.match(cp))
797         slotted = len(available_slots) > 1 or \
798                 (len(available_slots) == 1 and "0" not in available_slots)
799         if not slotted:
800                 # check the vdb in case this is multislot
801                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802                         for cpv in vardb.match(cp))
803                 slotted = len(available_slots) > 1 or \
804                         (len(available_slots) == 1 and "0" not in available_slots)
805         if slotted and arg_atom != cp:
806                 # If the user gave a specific atom, store it as a
807                 # slot atom in the world file.
808                 slot_atom = pkg.slot_atom
809
810                 # For USE=multislot, there are a couple of cases to
811                 # handle here:
812                 #
813                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814                 #    unknown value, so just record an unslotted atom.
815                 #
816                 # 2) SLOT comes from an installed package and there is no
817                 #    matching SLOT in the portage tree.
818                 #
819                 # Make sure that the slot atom is available in either the
820                 # portdb or the vardb, since otherwise the user certainly
821                 # doesn't want the SLOT atom recorded in the world file
822                 # (case 1 above).  If it's only available in the vardb,
823                 # the user may be trying to prevent a USE=multislot
824                 # package from being removed by --depclean (case 2 above).
825
826                 mydb = portdb
827                 if not portdb.match(slot_atom):
828                         # SLOT seems to come from an installed multislot package
829                         mydb = vardb
830                 # If there is no installed package matching the SLOT atom,
831                 # it probably changed SLOT spontaneously due to USE=multislot,
832                 # so just record an unslotted atom.
833                 if vardb.match(slot_atom):
834                         # Now verify that the argument is precise
835                         # enough to identify a specific slot.
836                         matches = mydb.match(arg_atom)
837                         matched_slots = set()
838                         for cpv in matches:
839                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840                         if len(matched_slots) == 1:
841                                 new_world_atom = slot_atom
842
843         if new_world_atom == sets["world"].findAtomForPackage(pkg):
844                 # Both atoms would be identical, so there's nothing to add.
845                 return None
846         if not slotted:
847                 # Unlike world atoms, system atoms are not greedy for slots, so they
848                 # can't be safely excluded from world if they are slotted.
849                 system_atom = sets["system"].findAtomForPackage(pkg)
850                 if system_atom:
851                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
852                                 return None
853                         # System virtuals aren't safe to exclude from world since they can
854                         # match multiple old-style virtuals but only one of them will be
855                         # pulled in by update or depclean.
856                         providers = portdb.mysettings.getvirtuals().get(
857                                 portage.dep_getkey(system_atom))
858                         if providers and len(providers) == 1 and providers[0] == cp:
859                                 return None
860         return new_world_atom
861
862 def filter_iuse_defaults(iuse):
863         for flag in iuse:
864                 if flag.startswith("+") or flag.startswith("-"):
865                         yield flag[1:]
866                 else:
867                         yield flag
868
869 class SlotObject(object):
870         __slots__ = ("__weakref__",)
871
872         def __init__(self, **kwargs):
873                 classes = [self.__class__]
874                 while classes:
875                         c = classes.pop()
876                         if c is SlotObject:
877                                 continue
878                         classes.extend(c.__bases__)
879                         slots = getattr(c, "__slots__", None)
880                         if not slots:
881                                 continue
882                         for myattr in slots:
883                                 myvalue = kwargs.get(myattr, None)
884                                 setattr(self, myattr, myvalue)
885
886         def copy(self):
887                 """
888                 Create a new instance and copy all attributes
889                 defined from __slots__ (including those from
890                 inherited classes).
891                 """
892                 obj = self.__class__()
893
894                 classes = [self.__class__]
895                 while classes:
896                         c = classes.pop()
897                         if c is SlotObject:
898                                 continue
899                         classes.extend(c.__bases__)
900                         slots = getattr(c, "__slots__", None)
901                         if not slots:
902                                 continue
903                         for myattr in slots:
904                                 setattr(obj, myattr, getattr(self, myattr))
905
906                 return obj
907
908 class AbstractDepPriority(SlotObject):
909         __slots__ = ("buildtime", "runtime", "runtime_post")
910
911         def __lt__(self, other):
912                 return self.__int__() < other
913
914         def __le__(self, other):
915                 return self.__int__() <= other
916
917         def __eq__(self, other):
918                 return self.__int__() == other
919
920         def __ne__(self, other):
921                 return self.__int__() != other
922
923         def __gt__(self, other):
924                 return self.__int__() > other
925
926         def __ge__(self, other):
927                 return self.__int__() >= other
928
929         def copy(self):
930                 import copy
931                 return copy.copy(self)
932
933 class DepPriority(AbstractDepPriority):
934
935         __slots__ = ("satisfied", "optional", "rebuild")
936
937         def __int__(self):
938                 return 0
939
940         def __str__(self):
941                 if self.optional:
942                         return "optional"
943                 if self.buildtime:
944                         return "buildtime"
945                 if self.runtime:
946                         return "runtime"
947                 if self.runtime_post:
948                         return "runtime_post"
949                 return "soft"
950
951 class BlockerDepPriority(DepPriority):
952         __slots__ = ()
953         def __int__(self):
954                 return 0
955
956         def __str__(self):
957                 return 'blocker'
958
959 BlockerDepPriority.instance = BlockerDepPriority()
960
961 class UnmergeDepPriority(AbstractDepPriority):
962         __slots__ = ("optional", "satisfied",)
963         """
964         Combination of properties           Priority  Category
965
966         runtime                                0       HARD
967         runtime_post                          -1       HARD
968         buildtime                             -2       SOFT
969         (none of the above)                   -2       SOFT
970         """
971
972         MAX    =  0
973         SOFT   = -2
974         MIN    = -2
975
976         def __int__(self):
977                 if self.runtime:
978                         return 0
979                 if self.runtime_post:
980                         return -1
981                 if self.buildtime:
982                         return -2
983                 return -2
984
985         def __str__(self):
986                 myvalue = self.__int__()
987                 if myvalue > self.SOFT:
988                         return "hard"
989                 return "soft"
990
991 class DepPriorityNormalRange(object):
992         """
993         DepPriority properties              Index      Category
994
995         buildtime                                      HARD
996         runtime                                3       MEDIUM
997         runtime_post                           2       MEDIUM_SOFT
998         optional                               1       SOFT
999         (none of the above)                    0       NONE
1000         """
1001         MEDIUM      = 3
1002         MEDIUM_SOFT = 2
1003         SOFT        = 1
1004         NONE        = 0
1005
1006         @classmethod
1007         def _ignore_optional(cls, priority):
1008                 if priority.__class__ is not DepPriority:
1009                         return False
1010                 return bool(priority.optional)
1011
1012         @classmethod
1013         def _ignore_runtime_post(cls, priority):
1014                 if priority.__class__ is not DepPriority:
1015                         return False
1016                 return bool(priority.optional or priority.runtime_post)
1017
1018         @classmethod
1019         def _ignore_runtime(cls, priority):
1020                 if priority.__class__ is not DepPriority:
1021                         return False
1022                 return not priority.buildtime
1023
1024         ignore_medium      = _ignore_runtime
1025         ignore_medium_soft = _ignore_runtime_post
1026         ignore_soft        = _ignore_optional
1027
1028 DepPriorityNormalRange.ignore_priority = (
1029         None,
1030         DepPriorityNormalRange._ignore_optional,
1031         DepPriorityNormalRange._ignore_runtime_post,
1032         DepPriorityNormalRange._ignore_runtime
1033 )
1034
1035 class DepPrioritySatisfiedRange(object):
1036         """
1037         DepPriority                         Index      Category
1038
1039         not satisfied and buildtime                    HARD
1040         not satisfied and runtime              7       MEDIUM
1041         not satisfied and runtime_post         6       MEDIUM_SOFT
1042         satisfied and buildtime and rebuild    5       SOFT
1043         satisfied and buildtime                4       SOFT
1044         satisfied and runtime                  3       SOFT
1045         satisfied and runtime_post             2       SOFT
1046         optional                               1       SOFT
1047         (none of the above)                    0       NONE
1048         """
1049         MEDIUM      = 7
1050         MEDIUM_SOFT = 6
1051         SOFT        = 5
1052         NONE        = 0
1053
1054         @classmethod
1055         def _ignore_optional(cls, priority):
1056                 if priority.__class__ is not DepPriority:
1057                         return False
1058                 return bool(priority.optional)
1059
1060         @classmethod
1061         def _ignore_satisfied_runtime_post(cls, priority):
1062                 if priority.__class__ is not DepPriority:
1063                         return False
1064                 if priority.optional:
1065                         return True
1066                 if not priority.satisfied:
1067                         return False
1068                 return bool(priority.runtime_post)
1069
1070         @classmethod
1071         def _ignore_satisfied_runtime(cls, priority):
1072                 if priority.__class__ is not DepPriority:
1073                         return False
1074                 if priority.optional:
1075                         return True
1076                 if not priority.satisfied:
1077                         return False
1078                 return not priority.buildtime
1079
1080         @classmethod
1081         def _ignore_satisfied_buildtime(cls, priority):
1082                 if priority.__class__ is not DepPriority:
1083                         return False
1084                 if priority.optional:
1085                         return True
1086                 if not priority.satisfied:
1087                         return False
1088                 if priority.buildtime:
1089                         return not priority.rebuild
1090                 return True
1091
1092         @classmethod
1093         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094                 if priority.__class__ is not DepPriority:
1095                         return False
1096                 if priority.optional:
1097                         return True
1098                 return bool(priority.satisfied)
1099
1100         @classmethod
1101         def _ignore_runtime_post(cls, priority):
1102                 if priority.__class__ is not DepPriority:
1103                         return False
1104                 return bool(priority.optional or \
1105                         priority.satisfied or \
1106                         priority.runtime_post)
1107
1108         @classmethod
1109         def _ignore_runtime(cls, priority):
1110                 if priority.__class__ is not DepPriority:
1111                         return False
1112                 return bool(priority.satisfied or \
1113                         not priority.buildtime)
1114
1115         ignore_medium      = _ignore_runtime
1116         ignore_medium_soft = _ignore_runtime_post
1117         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1118
1119 DepPrioritySatisfiedRange.ignore_priority = (
1120         None,
1121         DepPrioritySatisfiedRange._ignore_optional,
1122         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126         DepPrioritySatisfiedRange._ignore_runtime_post,
1127         DepPrioritySatisfiedRange._ignore_runtime
1128 )
1129
1130 def _find_deep_system_runtime_deps(graph):
1131         deep_system_deps = set()
1132         node_stack = []
1133         for node in graph:
1134                 if not isinstance(node, Package) or \
1135                         node.operation == 'uninstall':
1136                         continue
1137                 if node.root_config.sets['system'].findAtomForPackage(node):
1138                         node_stack.append(node)
1139
1140         def ignore_priority(priority):
1141                 """
1142                 Ignore non-runtime priorities.
1143                 """
1144                 if isinstance(priority, DepPriority) and \
1145                         (priority.runtime or priority.runtime_post):
1146                         return False
1147                 return True
1148
1149         while node_stack:
1150                 node = node_stack.pop()
1151                 if node in deep_system_deps:
1152                         continue
1153                 deep_system_deps.add(node)
1154                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155                         if not isinstance(child, Package) or \
1156                                 child.operation == 'uninstall':
1157                                 continue
1158                         node_stack.append(child)
1159
1160         return deep_system_deps
1161
1162 class FakeVartree(portage.vartree):
1163         """This is implements an in-memory copy of a vartree instance that provides
1164         all the interfaces required for use by the depgraph.  The vardb is locked
1165         during the constructor call just long enough to read a copy of the
1166         installed package information.  This allows the depgraph to do it's
1167         dependency calculations without holding a lock on the vardb.  It also
1168         allows things like vardb global updates to be done in memory so that the
1169         user doesn't necessarily need write access to the vardb in cases where
1170         global updates are necessary (updates are performed when necessary if there
1171         is not a matching ebuild in the tree)."""
1172         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173                 self._root_config = root_config
1174                 if pkg_cache is None:
1175                         pkg_cache = {}
1176                 real_vartree = root_config.trees["vartree"]
1177                 portdb = root_config.trees["porttree"].dbapi
1178                 self.root = real_vartree.root
1179                 self.settings = real_vartree.settings
1180                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181                 if "_mtime_" not in mykeys:
1182                         mykeys.append("_mtime_")
1183                 self._db_keys = mykeys
1184                 self._pkg_cache = pkg_cache
1185                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1187                 try:
1188                         # At least the parent needs to exist for the lock file.
1189                         portage.util.ensure_dirs(vdb_path)
1190                 except portage.exception.PortageException:
1191                         pass
1192                 vdb_lock = None
1193                 try:
1194                         if acquire_lock and os.access(vdb_path, os.W_OK):
1195                                 vdb_lock = portage.locks.lockdir(vdb_path)
1196                         real_dbapi = real_vartree.dbapi
1197                         slot_counters = {}
1198                         for cpv in real_dbapi.cpv_all():
1199                                 cache_key = ("installed", self.root, cpv, "nomerge")
1200                                 pkg = self._pkg_cache.get(cache_key)
1201                                 if pkg is not None:
1202                                         metadata = pkg.metadata
1203                                 else:
1204                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205                                 myslot = metadata["SLOT"]
1206                                 mycp = portage.dep_getkey(cpv)
1207                                 myslot_atom = "%s:%s" % (mycp, myslot)
1208                                 try:
1209                                         mycounter = long(metadata["COUNTER"])
1210                                 except ValueError:
1211                                         mycounter = 0
1212                                         metadata["COUNTER"] = str(mycounter)
1213                                 other_counter = slot_counters.get(myslot_atom, None)
1214                                 if other_counter is not None:
1215                                         if other_counter > mycounter:
1216                                                 continue
1217                                 slot_counters[myslot_atom] = mycounter
1218                                 if pkg is None:
1219                                         pkg = Package(built=True, cpv=cpv,
1220                                                 installed=True, metadata=metadata,
1221                                                 root_config=root_config, type_name="installed")
1222                                 self._pkg_cache[pkg] = pkg
1223                                 self.dbapi.cpv_inject(pkg)
1224                         real_dbapi.flush_cache()
1225                 finally:
1226                         if vdb_lock:
1227                                 portage.locks.unlockdir(vdb_lock)
1228                 # Populate the old-style virtuals using the cached values.
1229                 if not self.settings.treeVirtuals:
1230                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231                                 portage.getCPFromCPV, self.get_all_provides())
1232
1233                 # Intialize variables needed for lazy cache pulls of the live ebuild
1234                 # metadata.  This ensures that the vardb lock is released ASAP, without
1235                 # being delayed in case cache generation is triggered.
1236                 self._aux_get = self.dbapi.aux_get
1237                 self.dbapi.aux_get = self._aux_get_wrapper
1238                 self._match = self.dbapi.match
1239                 self.dbapi.match = self._match_wrapper
1240                 self._aux_get_history = set()
1241                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242                 self._portdb = portdb
1243                 self._global_updates = None
1244
1245         def _match_wrapper(self, cpv, use_cache=1):
1246                 """
1247                 Make sure the metadata in Package instances gets updated for any
1248                 cpv that is returned from a match() call, since the metadata can
1249                 be accessed directly from the Package instance instead of via
1250                 aux_get().
1251                 """
1252                 matches = self._match(cpv, use_cache=use_cache)
1253                 for cpv in matches:
1254                         if cpv in self._aux_get_history:
1255                                 continue
1256                         self._aux_get_wrapper(cpv, [])
1257                 return matches
1258
1259         def _aux_get_wrapper(self, pkg, wants):
1260                 if pkg in self._aux_get_history:
1261                         return self._aux_get(pkg, wants)
1262                 self._aux_get_history.add(pkg)
1263                 try:
1264                         # Use the live ebuild metadata if possible.
1265                         live_metadata = dict(izip(self._portdb_keys,
1266                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1267                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1268                                 raise KeyError(pkg)
1269                         self.dbapi.aux_update(pkg, live_metadata)
1270                 except (KeyError, portage.exception.PortageException):
1271                         if self._global_updates is None:
1272                                 self._global_updates = \
1273                                         grab_global_updates(self._portdb.porttree_root)
1274                         perform_global_updates(
1275                                 pkg, self.dbapi, self._global_updates)
1276                 return self._aux_get(pkg, wants)
1277
1278         def sync(self, acquire_lock=1):
1279                 """
1280                 Call this method to synchronize state with the real vardb
1281                 after one or more packages may have been installed or
1282                 uninstalled.
1283                 """
1284                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1285                 try:
1286                         # At least the parent needs to exist for the lock file.
1287                         portage.util.ensure_dirs(vdb_path)
1288                 except portage.exception.PortageException:
1289                         pass
1290                 vdb_lock = None
1291                 try:
1292                         if acquire_lock and os.access(vdb_path, os.W_OK):
1293                                 vdb_lock = portage.locks.lockdir(vdb_path)
1294                         self._sync()
1295                 finally:
1296                         if vdb_lock:
1297                                 portage.locks.unlockdir(vdb_lock)
1298
1299         def _sync(self):
1300
1301                 real_vardb = self._root_config.trees["vartree"].dbapi
1302                 current_cpv_set = frozenset(real_vardb.cpv_all())
1303                 pkg_vardb = self.dbapi
1304                 aux_get_history = self._aux_get_history
1305
1306                 # Remove any packages that have been uninstalled.
1307                 for pkg in list(pkg_vardb):
1308                         if pkg.cpv not in current_cpv_set:
1309                                 pkg_vardb.cpv_remove(pkg)
1310                                 aux_get_history.discard(pkg.cpv)
1311
1312                 # Validate counters and timestamps.
1313                 slot_counters = {}
1314                 root = self.root
1315                 validation_keys = ["COUNTER", "_mtime_"]
1316                 for cpv in current_cpv_set:
1317
1318                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1319                         pkg = pkg_vardb.get(pkg_hash_key)
1320                         if pkg is not None:
1321                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1322                                 try:
1323                                         counter = long(counter)
1324                                 except ValueError:
1325                                         counter = 0
1326
1327                                 if counter != pkg.counter or \
1328                                         mtime != pkg.mtime:
1329                                         pkg_vardb.cpv_remove(pkg)
1330                                         aux_get_history.discard(pkg.cpv)
1331                                         pkg = None
1332
1333                         if pkg is None:
1334                                 pkg = self._pkg(cpv)
1335
1336                         other_counter = slot_counters.get(pkg.slot_atom)
1337                         if other_counter is not None:
1338                                 if other_counter > pkg.counter:
1339                                         continue
1340
1341                         slot_counters[pkg.slot_atom] = pkg.counter
1342                         pkg_vardb.cpv_inject(pkg)
1343
1344                 real_vardb.flush_cache()
1345
1346         def _pkg(self, cpv):
1347                 root_config = self._root_config
1348                 real_vardb = root_config.trees["vartree"].dbapi
1349                 pkg = Package(cpv=cpv, installed=True,
1350                         metadata=izip(self._db_keys,
1351                         real_vardb.aux_get(cpv, self._db_keys)),
1352                         root_config=root_config,
1353                         type_name="installed")
1354
1355                 try:
1356                         mycounter = long(pkg.metadata["COUNTER"])
1357                 except ValueError:
1358                         mycounter = 0
1359                         pkg.metadata["COUNTER"] = str(mycounter)
1360
1361                 return pkg
1362
1363 def grab_global_updates(portdir):
1364         from portage.update import grab_updates, parse_updates
1365         updpath = os.path.join(portdir, "profiles", "updates")
1366         try:
1367                 rawupdates = grab_updates(updpath)
1368         except portage.exception.DirectoryNotFound:
1369                 rawupdates = []
1370         upd_commands = []
1371         for mykey, mystat, mycontent in rawupdates:
1372                 commands, errors = parse_updates(mycontent)
1373                 upd_commands.extend(commands)
1374         return upd_commands
1375
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377         from portage.update import update_dbentries
1378         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380         updates = update_dbentries(mycommands, aux_dict)
1381         if updates:
1382                 mydb.aux_update(mycpv, updates)
1383
1384 def visible(pkgsettings, pkg):
1385         """
1386         Check if a package is visible. This can raise an InvalidDependString
1387         exception if LICENSE is invalid.
1388         TODO: optionally generate a list of masking reasons
1389         @rtype: Boolean
1390         @returns: True if the package is visible, False otherwise.
1391         """
1392         if not pkg.metadata["SLOT"]:
1393                 return False
1394         if not pkg.installed:
1395                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1396                         return False
1397         eapi = pkg.metadata["EAPI"]
1398         if not portage.eapi_is_supported(eapi):
1399                 return False
1400         if not pkg.installed:
1401                 if portage._eapi_is_deprecated(eapi):
1402                         return False
1403                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1404                         return False
1405         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1408                 return False
1409         try:
1410                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1411                         return False
1412         except portage.exception.InvalidDependString:
1413                 return False
1414         return True
1415
1416 def get_masking_status(pkg, pkgsettings, root_config):
1417
1418         mreasons = portage.getmaskingstatus(
1419                 pkg, settings=pkgsettings,
1420                 portdb=root_config.trees["porttree"].dbapi)
1421
1422         if not pkg.installed:
1423                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424                         mreasons.append("CHOST: %s" % \
1425                                 pkg.metadata["CHOST"])
1426
1427         if not pkg.metadata["SLOT"]:
1428                 mreasons.append("invalid: SLOT is undefined")
1429
1430         return mreasons
1431
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433         db, pkg_type, built, installed, db_keys):
1434         eapi_masked = False
1435         try:
1436                 metadata = dict(izip(db_keys,
1437                         db.aux_get(cpv, db_keys)))
1438         except KeyError:
1439                 metadata = None
1440         if metadata and not built:
1441                 pkgsettings.setcpv(cpv, mydb=metadata)
1442                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444         if metadata is None:
1445                 mreasons = ["corruption"]
1446         else:
1447                 eapi = metadata['EAPI']
1448                 if eapi[:1] == '-':
1449                         eapi = eapi[1:]
1450                 if not portage.eapi_is_supported(eapi):
1451                         mreasons = ['EAPI %s' % eapi]
1452                 else:
1453                         pkg = Package(type_name=pkg_type, root_config=root_config,
1454                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456         return metadata, mreasons
1457
1458 def show_masked_packages(masked_packages):
1459         shown_licenses = set()
1460         shown_comments = set()
1461         # Maybe there is both an ebuild and a binary. Only
1462         # show one of them to avoid redundant appearance.
1463         shown_cpvs = set()
1464         have_eapi_mask = False
1465         for (root_config, pkgsettings, cpv,
1466                 metadata, mreasons) in masked_packages:
1467                 if cpv in shown_cpvs:
1468                         continue
1469                 shown_cpvs.add(cpv)
1470                 comment, filename = None, None
1471                 if "package.mask" in mreasons:
1472                         comment, filename = \
1473                                 portage.getmaskingreason(
1474                                 cpv, metadata=metadata,
1475                                 settings=pkgsettings,
1476                                 portdb=root_config.trees["porttree"].dbapi,
1477                                 return_location=True)
1478                 missing_licenses = []
1479                 if metadata:
1480                         if not portage.eapi_is_supported(metadata["EAPI"]):
1481                                 have_eapi_mask = True
1482                         try:
1483                                 missing_licenses = \
1484                                         pkgsettings._getMissingLicenses(
1485                                                 cpv, metadata)
1486                         except portage.exception.InvalidDependString:
1487                                 # This will have already been reported
1488                                 # above via mreasons.
1489                                 pass
1490
1491                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492                 if comment and comment not in shown_comments:
1493                         print filename+":"
1494                         print comment
1495                         shown_comments.add(comment)
1496                 portdb = root_config.trees["porttree"].dbapi
1497                 for l in missing_licenses:
1498                         l_path = portdb.findLicensePath(l)
1499                         if l in shown_licenses:
1500                                 continue
1501                         msg = ("A copy of the '%s' license" + \
1502                         " is located at '%s'.") % (l, l_path)
1503                         print msg
1504                         print
1505                         shown_licenses.add(l)
1506         return have_eapi_mask
1507
1508 class Task(SlotObject):
1509         __slots__ = ("_hash_key", "_hash_value")
1510
1511         def _get_hash_key(self):
1512                 hash_key = getattr(self, "_hash_key", None)
1513                 if hash_key is None:
1514                         raise NotImplementedError(self)
1515                 return hash_key
1516
1517         def __eq__(self, other):
1518                 return self._get_hash_key() == other
1519
1520         def __ne__(self, other):
1521                 return self._get_hash_key() != other
1522
1523         def __hash__(self):
1524                 hash_value = getattr(self, "_hash_value", None)
1525                 if hash_value is None:
1526                         self._hash_value = hash(self._get_hash_key())
1527                 return self._hash_value
1528
1529         def __len__(self):
1530                 return len(self._get_hash_key())
1531
1532         def __getitem__(self, key):
1533                 return self._get_hash_key()[key]
1534
1535         def __iter__(self):
1536                 return iter(self._get_hash_key())
1537
1538         def __contains__(self, key):
1539                 return key in self._get_hash_key()
1540
1541         def __str__(self):
1542                 return str(self._get_hash_key())
1543
1544 class Blocker(Task):
1545
1546         __hash__ = Task.__hash__
1547         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1548
1549         def __init__(self, **kwargs):
1550                 Task.__init__(self, **kwargs)
1551                 self.cp = portage.dep_getkey(self.atom)
1552
1553         def _get_hash_key(self):
1554                 hash_key = getattr(self, "_hash_key", None)
1555                 if hash_key is None:
1556                         self._hash_key = \
1557                                 ("blocks", self.root, self.atom, self.eapi)
1558                 return self._hash_key
1559
1560 class Package(Task):
1561
1562         __hash__ = Task.__hash__
1563         __slots__ = ("built", "cpv", "depth",
1564                 "installed", "metadata", "onlydeps", "operation",
1565                 "root_config", "type_name",
1566                 "category", "counter", "cp", "cpv_split",
1567                 "inherited", "iuse", "mtime",
1568                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1569
1570         metadata_keys = [
1571                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572                 "INHERITED", "IUSE", "KEYWORDS",
1573                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1575
1576         def __init__(self, **kwargs):
1577                 Task.__init__(self, **kwargs)
1578                 self.root = self.root_config.root
1579                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580                 self.cp = portage.cpv_getkey(self.cpv)
1581                 slot = self.slot
1582                 if not slot:
1583                         # Avoid an InvalidAtom exception when creating slot_atom.
1584                         # This package instance will be masked due to empty SLOT.
1585                         slot = '0'
1586                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587                 self.category, self.pf = portage.catsplit(self.cpv)
1588                 self.cpv_split = portage.catpkgsplit(self.cpv)
1589                 self.pv_split = self.cpv_split[1:]
1590
1591         class _use(object):
1592
1593                 __slots__ = ("__weakref__", "enabled")
1594
1595                 def __init__(self, use):
1596                         self.enabled = frozenset(use)
1597
1598         class _iuse(object):
1599
1600                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1601
1602                 def __init__(self, tokens, iuse_implicit):
1603                         self.tokens = tuple(tokens)
1604                         self.iuse_implicit = iuse_implicit
1605                         enabled = []
1606                         disabled = []
1607                         other = []
1608                         for x in tokens:
1609                                 prefix = x[:1]
1610                                 if prefix == "+":
1611                                         enabled.append(x[1:])
1612                                 elif prefix == "-":
1613                                         disabled.append(x[1:])
1614                                 else:
1615                                         other.append(x)
1616                         self.enabled = frozenset(enabled)
1617                         self.disabled = frozenset(disabled)
1618                         self.all = frozenset(chain(enabled, disabled, other))
1619
1620                 def __getattribute__(self, name):
1621                         if name == "regex":
1622                                 try:
1623                                         return object.__getattribute__(self, "regex")
1624                                 except AttributeError:
1625                                         all = object.__getattribute__(self, "all")
1626                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627                                         # Escape anything except ".*" which is supposed
1628                                         # to pass through from _get_implicit_iuse()
1629                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630                                         regex = "^(%s)$" % "|".join(regex)
1631                                         regex = regex.replace("\\.\\*", ".*")
1632                                         self.regex = re.compile(regex)
1633                         return object.__getattribute__(self, name)
1634
1635         def _get_hash_key(self):
1636                 hash_key = getattr(self, "_hash_key", None)
1637                 if hash_key is None:
1638                         if self.operation is None:
1639                                 self.operation = "merge"
1640                                 if self.onlydeps or self.installed:
1641                                         self.operation = "nomerge"
1642                         self._hash_key = \
1643                                 (self.type_name, self.root, self.cpv, self.operation)
1644                 return self._hash_key
1645
1646         def __lt__(self, other):
1647                 if other.cp != self.cp:
1648                         return False
1649                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1650                         return True
1651                 return False
1652
1653         def __le__(self, other):
1654                 if other.cp != self.cp:
1655                         return False
1656                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1657                         return True
1658                 return False
1659
1660         def __gt__(self, other):
1661                 if other.cp != self.cp:
1662                         return False
1663                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1664                         return True
1665                 return False
1666
1667         def __ge__(self, other):
1668                 if other.cp != self.cp:
1669                         return False
1670                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1671                         return True
1672                 return False
1673
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675         if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1678
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1681
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1683         """
1684         Detect metadata updates and synchronize Package attributes.
1685         """
1686
1687         __slots__ = ("_pkg",)
1688         _wrapped_keys = frozenset(
1689                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1690
1691         def __init__(self, pkg, metadata):
1692                 _PackageMetadataWrapperBase.__init__(self)
1693                 self._pkg = pkg
1694                 self.update(metadata)
1695
1696         def __setitem__(self, k, v):
1697                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698                 if k in self._wrapped_keys:
1699                         getattr(self, "_set_" + k.lower())(k, v)
1700
1701         def _set_inherited(self, k, v):
1702                 if isinstance(v, basestring):
1703                         v = frozenset(v.split())
1704                 self._pkg.inherited = v
1705
1706         def _set_iuse(self, k, v):
1707                 self._pkg.iuse = self._pkg._iuse(
1708                         v.split(), self._pkg.root_config.iuse_implicit)
1709
1710         def _set_slot(self, k, v):
1711                 self._pkg.slot = v
1712
1713         def _set_use(self, k, v):
1714                 self._pkg.use = self._pkg._use(v.split())
1715
1716         def _set_counter(self, k, v):
1717                 if isinstance(v, basestring):
1718                         try:
1719                                 v = long(v.strip())
1720                         except ValueError:
1721                                 v = 0
1722                 self._pkg.counter = v
1723
1724         def _set__mtime_(self, k, v):
1725                 if isinstance(v, basestring):
1726                         try:
1727                                 v = long(v.strip())
1728                         except ValueError:
1729                                 v = 0
1730                 self._pkg.mtime = v
1731
1732 class EbuildFetchonly(SlotObject):
1733
1734         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1735
1736         def execute(self):
1737                 settings = self.settings
1738                 pkg = self.pkg
1739                 portdb = pkg.root_config.trees["porttree"].dbapi
1740                 ebuild_path = portdb.findname(pkg.cpv)
1741                 settings.setcpv(pkg)
1742                 debug = settings.get("PORTAGE_DEBUG") == "1"
1743                 use_cache = 1 # always true
1744                 portage.doebuild_environment(ebuild_path, "fetch",
1745                         settings["ROOT"], settings, debug, use_cache, portdb)
1746                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1747
1748                 if restrict_fetch:
1749                         rval = self._execute_with_builddir()
1750                 else:
1751                         rval = portage.doebuild(ebuild_path, "fetch",
1752                                 settings["ROOT"], settings, debug=debug,
1753                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754                                 mydbapi=portdb, tree="porttree")
1755
1756                         if rval != os.EX_OK:
1757                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758                                 eerror(msg, phase="unpack", key=pkg.cpv)
1759
1760                 return rval
1761
1762         def _execute_with_builddir(self):
1763                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764                 # ensuring sane $PWD (bug #239560) and storing elog
1765                 # messages. Use a private temp directory, in order
1766                 # to avoid locking the main one.
1767                 settings = self.settings
1768                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769                 from tempfile import mkdtemp
1770                 try:
1771                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1772                 except OSError, e:
1773                         if e.errno != portage.exception.PermissionDenied.errno:
1774                                 raise
1775                         raise portage.exception.PermissionDenied(global_tmpdir)
1776                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777                 settings.backup_changes("PORTAGE_TMPDIR")
1778                 try:
1779                         retval = self._execute()
1780                 finally:
1781                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1782                         settings.backup_changes("PORTAGE_TMPDIR")
1783                         shutil.rmtree(private_tmpdir)
1784                 return retval
1785
1786         def _execute(self):
1787                 settings = self.settings
1788                 pkg = self.pkg
1789                 root_config = pkg.root_config
1790                 portdb = root_config.trees["porttree"].dbapi
1791                 ebuild_path = portdb.findname(pkg.cpv)
1792                 debug = settings.get("PORTAGE_DEBUG") == "1"
1793                 retval = portage.doebuild(ebuild_path, "fetch",
1794                         self.settings["ROOT"], self.settings, debug=debug,
1795                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796                         mydbapi=portdb, tree="porttree")
1797
1798                 if retval != os.EX_OK:
1799                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800                         eerror(msg, phase="unpack", key=pkg.cpv)
1801
1802                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803                 return retval
1804
1805 class PollConstants(object):
1806
1807         """
1808         Provides POLL* constants that are equivalent to those from the
1809         select module, for use by PollSelectAdapter.
1810         """
1811
1812         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813         v = 1
1814         for k in names:
1815                 locals()[k] = getattr(select, k, v)
1816                 v *= 2
1817         del k, v
1818
1819 class AsynchronousTask(SlotObject):
1820         """
1821         Subclasses override _wait() and _poll() so that calls
1822         to public methods can be wrapped for implementing
1823         hooks such as exit listener notification.
1824
1825         Sublasses should call self.wait() to notify exit listeners after
1826         the task is complete and self.returncode has been set.
1827         """
1828
1829         __slots__ = ("background", "cancelled", "returncode") + \
1830                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1831
1832         def start(self):
1833                 """
1834                 Start an asynchronous task and then return as soon as possible.
1835                 """
1836                 self._start_hook()
1837                 self._start()
1838
1839         def _start(self):
1840                 raise NotImplementedError(self)
1841
1842         def isAlive(self):
1843                 return self.returncode is None
1844
1845         def poll(self):
1846                 self._wait_hook()
1847                 return self._poll()
1848
1849         def _poll(self):
1850                 return self.returncode
1851
1852         def wait(self):
1853                 if self.returncode is None:
1854                         self._wait()
1855                 self._wait_hook()
1856                 return self.returncode
1857
1858         def _wait(self):
1859                 return self.returncode
1860
1861         def cancel(self):
1862                 self.cancelled = True
1863                 self.wait()
1864
1865         def addStartListener(self, f):
1866                 """
1867                 The function will be called with one argument, a reference to self.
1868                 """
1869                 if self._start_listeners is None:
1870                         self._start_listeners = []
1871                 self._start_listeners.append(f)
1872
1873         def removeStartListener(self, f):
1874                 if self._start_listeners is None:
1875                         return
1876                 self._start_listeners.remove(f)
1877
1878         def _start_hook(self):
1879                 if self._start_listeners is not None:
1880                         start_listeners = self._start_listeners
1881                         self._start_listeners = None
1882
1883                         for f in start_listeners:
1884                                 f(self)
1885
1886         def addExitListener(self, f):
1887                 """
1888                 The function will be called with one argument, a reference to self.
1889                 """
1890                 if self._exit_listeners is None:
1891                         self._exit_listeners = []
1892                 self._exit_listeners.append(f)
1893
1894         def removeExitListener(self, f):
1895                 if self._exit_listeners is None:
1896                         if self._exit_listener_stack is not None:
1897                                 self._exit_listener_stack.remove(f)
1898                         return
1899                 self._exit_listeners.remove(f)
1900
1901         def _wait_hook(self):
1902                 """
1903                 Call this method after the task completes, just before returning
1904                 the returncode from wait() or poll(). This hook is
1905                 used to trigger exit listeners when the returncode first
1906                 becomes available.
1907                 """
1908                 if self.returncode is not None and \
1909                         self._exit_listeners is not None:
1910
1911                         # This prevents recursion, in case one of the
1912                         # exit handlers triggers this method again by
1913                         # calling wait(). Use a stack that gives
1914                         # removeExitListener() an opportunity to consume
1915                         # listeners from the stack, before they can get
1916                         # called below. This is necessary because a call
1917                         # to one exit listener may result in a call to
1918                         # removeExitListener() for another listener on
1919                         # the stack. That listener needs to be removed
1920                         # from the stack since it would be inconsistent
1921                         # to call it after it has been been passed into
1922                         # removeExitListener().
1923                         self._exit_listener_stack = self._exit_listeners
1924                         self._exit_listeners = None
1925
1926                         self._exit_listener_stack.reverse()
1927                         while self._exit_listener_stack:
1928                                 self._exit_listener_stack.pop()(self)
1929
1930 class AbstractPollTask(AsynchronousTask):
1931
1932         __slots__ = ("scheduler",) + \
1933                 ("_registered",)
1934
1935         _bufsize = 4096
1936         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938                 _exceptional_events
1939
1940         def _unregister(self):
1941                 raise NotImplementedError(self)
1942
1943         def _unregister_if_appropriate(self, event):
1944                 if self._registered:
1945                         if event & self._exceptional_events:
1946                                 self._unregister()
1947                                 self.cancel()
1948                         elif event & PollConstants.POLLHUP:
1949                                 self._unregister()
1950                                 self.wait()
1951
1952 class PipeReader(AbstractPollTask):
1953
1954         """
1955         Reads output from one or more files and saves it in memory,
1956         for retrieval via the getvalue() method. This is driven by
1957         the scheduler's poll() loop, so it runs entirely within the
1958         current process.
1959         """
1960
1961         __slots__ = ("input_files",) + \
1962                 ("_read_data", "_reg_ids")
1963
1964         def _start(self):
1965                 self._reg_ids = set()
1966                 self._read_data = []
1967                 for k, f in self.input_files.iteritems():
1968                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1971                                 self._registered_events, self._output_handler))
1972                 self._registered = True
1973
1974         def isAlive(self):
1975                 return self._registered
1976
1977         def cancel(self):
1978                 if self.returncode is None:
1979                         self.returncode = 1
1980                         self.cancelled = True
1981                 self.wait()
1982
1983         def _wait(self):
1984                 if self.returncode is not None:
1985                         return self.returncode
1986
1987                 if self._registered:
1988                         self.scheduler.schedule(self._reg_ids)
1989                         self._unregister()
1990
1991                 self.returncode = os.EX_OK
1992                 return self.returncode
1993
1994         def getvalue(self):
1995                 """Retrieve the entire contents"""
1996                 if sys.hexversion >= 0x3000000:
1997                         return bytes().join(self._read_data)
1998                 return "".join(self._read_data)
1999
2000         def close(self):
2001                 """Free the memory buffer."""
2002                 self._read_data = None
2003
2004         def _output_handler(self, fd, event):
2005
2006                 if event & PollConstants.POLLIN:
2007
2008                         for f in self.input_files.itervalues():
2009                                 if fd == f.fileno():
2010                                         break
2011
2012                         buf = array.array('B')
2013                         try:
2014                                 buf.fromfile(f, self._bufsize)
2015                         except EOFError:
2016                                 pass
2017
2018                         if buf:
2019                                 self._read_data.append(buf.tostring())
2020                         else:
2021                                 self._unregister()
2022                                 self.wait()
2023
2024                 self._unregister_if_appropriate(event)
2025                 return self._registered
2026
2027         def _unregister(self):
2028                 """
2029                 Unregister from the scheduler and close open files.
2030                 """
2031
2032                 self._registered = False
2033
2034                 if self._reg_ids is not None:
2035                         for reg_id in self._reg_ids:
2036                                 self.scheduler.unregister(reg_id)
2037                         self._reg_ids = None
2038
2039                 if self.input_files is not None:
2040                         for f in self.input_files.itervalues():
2041                                 f.close()
2042                         self.input_files = None
2043
2044 class CompositeTask(AsynchronousTask):
2045
2046         __slots__ = ("scheduler",) + ("_current_task",)
2047
2048         def isAlive(self):
2049                 return self._current_task is not None
2050
2051         def cancel(self):
2052                 self.cancelled = True
2053                 if self._current_task is not None:
2054                         self._current_task.cancel()
2055
2056         def _poll(self):
2057                 """
2058                 This does a loop calling self._current_task.poll()
2059                 repeatedly as long as the value of self._current_task
2060                 keeps changing. It calls poll() a maximum of one time
2061                 for a given self._current_task instance. This is useful
2062                 since calling poll() on a task can trigger advance to
2063                 the next task could eventually lead to the returncode
2064                 being set in cases when polling only a single task would
2065                 not have the same effect.
2066                 """
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None or task is prev:
2072                                 # don't poll the same task more than once
2073                                 break
2074                         task.poll()
2075                         prev = task
2076
2077                 return self.returncode
2078
2079         def _wait(self):
2080
2081                 prev = None
2082                 while True:
2083                         task = self._current_task
2084                         if task is None:
2085                                 # don't wait for the same task more than once
2086                                 break
2087                         if task is prev:
2088                                 # Before the task.wait() method returned, an exit
2089                                 # listener should have set self._current_task to either
2090                                 # a different task or None. Something is wrong.
2091                                 raise AssertionError("self._current_task has not " + \
2092                                         "changed since calling wait", self, task)
2093                         task.wait()
2094                         prev = task
2095
2096                 return self.returncode
2097
2098         def _assert_current(self, task):
2099                 """
2100                 Raises an AssertionError if the given task is not the
2101                 same one as self._current_task. This can be useful
2102                 for detecting bugs.
2103                 """
2104                 if task is not self._current_task:
2105                         raise AssertionError("Unrecognized task: %s" % (task,))
2106
2107         def _default_exit(self, task):
2108                 """
2109                 Calls _assert_current() on the given task and then sets the
2110                 composite returncode attribute if task.returncode != os.EX_OK.
2111                 If the task failed then self._current_task will be set to None.
2112                 Subclasses can use this as a generic task exit callback.
2113
2114                 @rtype: int
2115                 @returns: The task.returncode attribute.
2116                 """
2117                 self._assert_current(task)
2118                 if task.returncode != os.EX_OK:
2119                         self.returncode = task.returncode
2120                         self._current_task = None
2121                 return task.returncode
2122
2123         def _final_exit(self, task):
2124                 """
2125                 Assumes that task is the final task of this composite task.
2126                 Calls _default_exit() and sets self.returncode to the task's
2127                 returncode and sets self._current_task to None.
2128                 """
2129                 self._default_exit(task)
2130                 self._current_task = None
2131                 self.returncode = task.returncode
2132                 return self.returncode
2133
2134         def _default_final_exit(self, task):
2135                 """
2136                 This calls _final_exit() and then wait().
2137
2138                 Subclasses can use this as a generic final task exit callback.
2139
2140                 """
2141                 self._final_exit(task)
2142                 return self.wait()
2143
2144         def _start_task(self, task, exit_handler):
2145                 """
2146                 Register exit handler for the given task, set it
2147                 as self._current_task, and call task.start().
2148
2149                 Subclasses can use this as a generic way to start
2150                 a task.
2151
2152                 """
2153                 task.addExitListener(exit_handler)
2154                 self._current_task = task
2155                 task.start()
2156
2157 class TaskSequence(CompositeTask):
2158         """
2159         A collection of tasks that executes sequentially. Each task
2160         must have a addExitListener() method that can be used as
2161         a means to trigger movement from one task to the next.
2162         """
2163
2164         __slots__ = ("_task_queue",)
2165
2166         def __init__(self, **kwargs):
2167                 AsynchronousTask.__init__(self, **kwargs)
2168                 self._task_queue = deque()
2169
2170         def add(self, task):
2171                 self._task_queue.append(task)
2172
2173         def _start(self):
2174                 self._start_next_task()
2175
2176         def cancel(self):
2177                 self._task_queue.clear()
2178                 CompositeTask.cancel(self)
2179
2180         def _start_next_task(self):
2181                 self._start_task(self._task_queue.popleft(),
2182                         self._task_exit_handler)
2183
2184         def _task_exit_handler(self, task):
2185                 if self._default_exit(task) != os.EX_OK:
2186                         self.wait()
2187                 elif self._task_queue:
2188                         self._start_next_task()
2189                 else:
2190                         self._final_exit(task)
2191                         self.wait()
2192
2193 class SubProcess(AbstractPollTask):
2194
2195         __slots__ = ("pid",) + \
2196                 ("_files", "_reg_id")
2197
2198         # A file descriptor is required for the scheduler to monitor changes from
2199         # inside a poll() loop. When logging is not enabled, create a pipe just to
2200         # serve this purpose alone.
2201         _dummy_pipe_fd = 9
2202
2203         def _poll(self):
2204                 if self.returncode is not None:
2205                         return self.returncode
2206                 if self.pid is None:
2207                         return self.returncode
2208                 if self._registered:
2209                         return self.returncode
2210
2211                 try:
2212                         retval = os.waitpid(self.pid, os.WNOHANG)
2213                 except OSError, e:
2214                         if e.errno != errno.ECHILD:
2215                                 raise
2216                         del e
2217                         retval = (self.pid, 1)
2218
2219                 if retval == (0, 0):
2220                         return None
2221                 self._set_returncode(retval)
2222                 return self.returncode
2223
2224         def cancel(self):
2225                 if self.isAlive():
2226                         try:
2227                                 os.kill(self.pid, signal.SIGTERM)
2228                         except OSError, e:
2229                                 if e.errno != errno.ESRCH:
2230                                         raise
2231                                 del e
2232
2233                 self.cancelled = True
2234                 if self.pid is not None:
2235                         self.wait()
2236                 return self.returncode
2237
2238         def isAlive(self):
2239                 return self.pid is not None and \
2240                         self.returncode is None
2241
2242         def _wait(self):
2243
2244                 if self.returncode is not None:
2245                         return self.returncode
2246
2247                 if self._registered:
2248                         self.scheduler.schedule(self._reg_id)
2249                         self._unregister()
2250                         if self.returncode is not None:
2251                                 return self.returncode
2252
2253                 try:
2254                         wait_retval = os.waitpid(self.pid, 0)
2255                 except OSError, e:
2256                         if e.errno != errno.ECHILD:
2257                                 raise
2258                         del e
2259                         self._set_returncode((self.pid, 1))
2260                 else:
2261                         self._set_returncode(wait_retval)
2262
2263                 return self.returncode
2264
2265         def _unregister(self):
2266                 """
2267                 Unregister from the scheduler and close open files.
2268                 """
2269
2270                 self._registered = False
2271
2272                 if self._reg_id is not None:
2273                         self.scheduler.unregister(self._reg_id)
2274                         self._reg_id = None
2275
2276                 if self._files is not None:
2277                         for f in self._files.itervalues():
2278                                 f.close()
2279                         self._files = None
2280
2281         def _set_returncode(self, wait_retval):
2282
2283                 retval = wait_retval[1]
2284
2285                 if retval != os.EX_OK:
2286                         if retval & 0xff:
2287                                 retval = (retval & 0xff) << 8
2288                         else:
2289                                 retval = retval >> 8
2290
2291                 self.returncode = retval
2292
2293 class SpawnProcess(SubProcess):
2294
2295         """
2296         Constructor keyword args are passed into portage.process.spawn().
2297         The required "args" keyword argument will be passed as the first
2298         spawn() argument.
2299         """
2300
2301         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302                 "uid", "gid", "groups", "umask", "logfile",
2303                 "path_lookup", "pre_exec")
2304
2305         __slots__ = ("args",) + \
2306                 _spawn_kwarg_names
2307
2308         _file_names = ("log", "process", "stdout")
2309         _files_dict = slot_dict_class(_file_names, prefix="")
2310
2311         def _start(self):
2312
2313                 if self.cancelled:
2314                         return
2315
2316                 if self.fd_pipes is None:
2317                         self.fd_pipes = {}
2318                 fd_pipes = self.fd_pipes
2319                 fd_pipes.setdefault(0, sys.stdin.fileno())
2320                 fd_pipes.setdefault(1, sys.stdout.fileno())
2321                 fd_pipes.setdefault(2, sys.stderr.fileno())
2322
2323                 # flush any pending output
2324                 for fd in fd_pipes.itervalues():
2325                         if fd == sys.stdout.fileno():
2326                                 sys.stdout.flush()
2327                         if fd == sys.stderr.fileno():
2328                                 sys.stderr.flush()
2329
2330                 logfile = self.logfile
2331                 self._files = self._files_dict()
2332                 files = self._files
2333
2334                 master_fd, slave_fd = self._pipe(fd_pipes)
2335                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337
2338                 null_input = None
2339                 fd_pipes_orig = fd_pipes.copy()
2340                 if self.background:
2341                         # TODO: Use job control functions like tcsetpgrp() to control
2342                         # access to stdin. Until then, use /dev/null so that any
2343                         # attempts to read from stdin will immediately return EOF
2344                         # instead of blocking indefinitely.
2345                         null_input = open('/dev/null', 'rb')
2346                         fd_pipes[0] = null_input.fileno()
2347                 else:
2348                         fd_pipes[0] = fd_pipes_orig[0]
2349
2350                 files.process = os.fdopen(master_fd, 'rb')
2351                 if logfile is not None:
2352
2353                         fd_pipes[1] = slave_fd
2354                         fd_pipes[2] = slave_fd
2355
2356                         files.log = open(logfile, mode='ab')
2357                         portage.util.apply_secpass_permissions(logfile,
2358                                 uid=portage.portage_uid, gid=portage.portage_gid,
2359                                 mode=0660)
2360
2361                         if not self.background:
2362                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2363
2364                         output_handler = self._output_handler
2365
2366                 else:
2367
2368                         # Create a dummy pipe so the scheduler can monitor
2369                         # the process from inside a poll() loop.
2370                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2371                         if self.background:
2372                                 fd_pipes[1] = slave_fd
2373                                 fd_pipes[2] = slave_fd
2374                         output_handler = self._dummy_handler
2375
2376                 kwargs = {}
2377                 for k in self._spawn_kwarg_names:
2378                         v = getattr(self, k)
2379                         if v is not None:
2380                                 kwargs[k] = v
2381
2382                 kwargs["fd_pipes"] = fd_pipes
2383                 kwargs["returnpid"] = True
2384                 kwargs.pop("logfile", None)
2385
2386                 self._reg_id = self.scheduler.register(files.process.fileno(),
2387                         self._registered_events, output_handler)
2388                 self._registered = True
2389
2390                 retval = self._spawn(self.args, **kwargs)
2391
2392                 os.close(slave_fd)
2393                 if null_input is not None:
2394                         null_input.close()
2395
2396                 if isinstance(retval, int):
2397                         # spawn failed
2398                         self._unregister()
2399                         self.returncode = retval
2400                         self.wait()
2401                         return
2402
2403                 self.pid = retval[0]
2404                 portage.process.spawned_pids.remove(self.pid)
2405
2406         def _pipe(self, fd_pipes):
2407                 """
2408                 @type fd_pipes: dict
2409                 @param fd_pipes: pipes from which to copy terminal size if desired.
2410                 """
2411                 return os.pipe()
2412
2413         def _spawn(self, args, **kwargs):
2414                 return portage.process.spawn(args, **kwargs)
2415
2416         def _output_handler(self, fd, event):
2417
2418                 if event & PollConstants.POLLIN:
2419
2420                         files = self._files
2421                         buf = array.array('B')
2422                         try:
2423                                 buf.fromfile(files.process, self._bufsize)
2424                         except EOFError:
2425                                 pass
2426
2427                         if buf:
2428                                 if not self.background:
2429                                         write_successful = False
2430                                         failures = 0
2431                                         while True:
2432                                                 try:
2433                                                         if not write_successful:
2434                                                                 buf.tofile(files.stdout)
2435                                                                 write_successful = True
2436                                                         files.stdout.flush()
2437                                                         break
2438                                                 except IOError, e:
2439                                                         if e.errno != errno.EAGAIN:
2440                                                                 raise
2441                                                         del e
2442                                                         failures += 1
2443                                                         if failures > 50:
2444                                                                 # Avoid a potentially infinite loop. In
2445                                                                 # most cases, the failure count is zero
2446                                                                 # and it's unlikely to exceed 1.
2447                                                                 raise
2448
2449                                                         # This means that a subprocess has put an inherited
2450                                                         # stdio file descriptor (typically stdin) into
2451                                                         # O_NONBLOCK mode. This is not acceptable (see bug
2452                                                         # #264435), so revert it. We need to use a loop
2453                                                         # here since there's a race condition due to
2454                                                         # parallel processes being able to change the
2455                                                         # flags on the inherited file descriptor.
2456                                                         # TODO: When possible, avoid having child processes
2457                                                         # inherit stdio file descriptors from portage
2458                                                         # (maybe it can't be avoided with
2459                                                         # PROPERTIES=interactive).
2460                                                         fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2461                                                                 fcntl.fcntl(files.stdout.fileno(),
2462                                                                 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2463
2464                                 buf.tofile(files.log)
2465                                 files.log.flush()
2466                         else:
2467                                 self._unregister()
2468                                 self.wait()
2469
2470                 self._unregister_if_appropriate(event)
2471                 return self._registered
2472
2473         def _dummy_handler(self, fd, event):
2474                 """
2475                 This method is mainly interested in detecting EOF, since
2476                 the only purpose of the pipe is to allow the scheduler to
2477                 monitor the process from inside a poll() loop.
2478                 """
2479
2480                 if event & PollConstants.POLLIN:
2481
2482                         buf = array.array('B')
2483                         try:
2484                                 buf.fromfile(self._files.process, self._bufsize)
2485                         except EOFError:
2486                                 pass
2487
2488                         if buf:
2489                                 pass
2490                         else:
2491                                 self._unregister()
2492                                 self.wait()
2493
2494                 self._unregister_if_appropriate(event)
2495                 return self._registered
2496
2497 class MiscFunctionsProcess(SpawnProcess):
2498         """
2499         Spawns misc-functions.sh with an existing ebuild environment.
2500         """
2501
2502         __slots__ = ("commands", "phase", "pkg", "settings")
2503
2504         def _start(self):
2505                 settings = self.settings
2506                 settings.pop("EBUILD_PHASE", None)
2507                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2508                 misc_sh_binary = os.path.join(portage_bin_path,
2509                         os.path.basename(portage.const.MISC_SH_BINARY))
2510
2511                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2512                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2513
2514                 portage._doebuild_exit_status_unlink(
2515                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2516
2517                 SpawnProcess._start(self)
2518
2519         def _spawn(self, args, **kwargs):
2520                 settings = self.settings
2521                 debug = settings.get("PORTAGE_DEBUG") == "1"
2522                 return portage.spawn(" ".join(args), settings,
2523                         debug=debug, **kwargs)
2524
2525         def _set_returncode(self, wait_retval):
2526                 SpawnProcess._set_returncode(self, wait_retval)
2527                 self.returncode = portage._doebuild_exit_status_check_and_log(
2528                         self.settings, self.phase, self.returncode)
2529
2530 class EbuildFetcher(SpawnProcess):
2531
2532         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2533                 ("_build_dir",)
2534
2535         def _start(self):
2536
2537                 root_config = self.pkg.root_config
2538                 portdb = root_config.trees["porttree"].dbapi
2539                 ebuild_path = portdb.findname(self.pkg.cpv)
2540                 settings = self.config_pool.allocate()
2541                 settings.setcpv(self.pkg)
2542
2543                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2544                 # should not be touched since otherwise it could interfere with
2545                 # another instance of the same cpv concurrently being built for a
2546                 # different $ROOT (currently, builds only cooperate with prefetchers
2547                 # that are spawned for the same $ROOT).
2548                 if not self.prefetch:
2549                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2550                         self._build_dir.lock()
2551                         self._build_dir.clean_log()
2552                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2553                         if self.logfile is None:
2554                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2555
2556                 phase = "fetch"
2557                 if self.fetchall:
2558                         phase = "fetchall"
2559
2560                 # If any incremental variables have been overridden
2561                 # via the environment, those values need to be passed
2562                 # along here so that they are correctly considered by
2563                 # the config instance in the subproccess.
2564                 fetch_env = os.environ.copy()
2565
2566                 nocolor = settings.get("NOCOLOR")
2567                 if nocolor is not None:
2568                         fetch_env["NOCOLOR"] = nocolor
2569
2570                 fetch_env["PORTAGE_NICENESS"] = "0"
2571                 if self.prefetch:
2572                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2573
2574                 ebuild_binary = os.path.join(
2575                         settings["PORTAGE_BIN_PATH"], "ebuild")
2576
2577                 fetch_args = [ebuild_binary, ebuild_path, phase]
2578                 debug = settings.get("PORTAGE_DEBUG") == "1"
2579                 if debug:
2580                         fetch_args.append("--debug")
2581
2582                 self.args = fetch_args
2583                 self.env = fetch_env
2584                 SpawnProcess._start(self)
2585
2586         def _pipe(self, fd_pipes):
2587                 """When appropriate, use a pty so that fetcher progress bars,
2588                 like wget has, will work properly."""
2589                 if self.background or not sys.stdout.isatty():
2590                         # When the output only goes to a log file,
2591                         # there's no point in creating a pty.
2592                         return os.pipe()
2593                 stdout_pipe = fd_pipes.get(1)
2594                 got_pty, master_fd, slave_fd = \
2595                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2596                 return (master_fd, slave_fd)
2597
2598         def _set_returncode(self, wait_retval):
2599                 SpawnProcess._set_returncode(self, wait_retval)
2600                 # Collect elog messages that might have been
2601                 # created by the pkg_nofetch phase.
2602                 if self._build_dir is not None:
2603                         # Skip elog messages for prefetch, in order to avoid duplicates.
2604                         if not self.prefetch and self.returncode != os.EX_OK:
2605                                 elog_out = None
2606                                 if self.logfile is not None:
2607                                         if self.background:
2608                                                 elog_out = open(self.logfile, 'a')
2609                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2610                                 if self.logfile is not None:
2611                                         msg += ", Log file:"
2612                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2613                                 if self.logfile is not None:
2614                                         eerror(" '%s'" % (self.logfile,),
2615                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2616                                 if elog_out is not None:
2617                                         elog_out.close()
2618                         if not self.prefetch:
2619                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2620                         features = self._build_dir.settings.features
2621                         if self.returncode == os.EX_OK:
2622                                 self._build_dir.clean_log()
2623                         self._build_dir.unlock()
2624                         self.config_pool.deallocate(self._build_dir.settings)
2625                         self._build_dir = None
2626
2627 class EbuildBuildDir(SlotObject):
2628
2629         __slots__ = ("dir_path", "pkg", "settings",
2630                 "locked", "_catdir", "_lock_obj")
2631
2632         def __init__(self, **kwargs):
2633                 SlotObject.__init__(self, **kwargs)
2634                 self.locked = False
2635
2636         def lock(self):
2637                 """
2638                 This raises an AlreadyLocked exception if lock() is called
2639                 while a lock is already held. In order to avoid this, call
2640                 unlock() or check whether the "locked" attribute is True
2641                 or False before calling lock().
2642                 """
2643                 if self._lock_obj is not None:
2644                         raise self.AlreadyLocked((self._lock_obj,))
2645
2646                 dir_path = self.dir_path
2647                 if dir_path is None:
2648                         root_config = self.pkg.root_config
2649                         portdb = root_config.trees["porttree"].dbapi
2650                         ebuild_path = portdb.findname(self.pkg.cpv)
2651                         settings = self.settings
2652                         settings.setcpv(self.pkg)
2653                         debug = settings.get("PORTAGE_DEBUG") == "1"
2654                         use_cache = 1 # always true
2655                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2656                                 self.settings, debug, use_cache, portdb)
2657                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2658
2659                 catdir = os.path.dirname(dir_path)
2660                 self._catdir = catdir
2661
2662                 portage.util.ensure_dirs(os.path.dirname(catdir),
2663                         gid=portage.portage_gid,
2664                         mode=070, mask=0)
2665                 catdir_lock = None
2666                 try:
2667                         catdir_lock = portage.locks.lockdir(catdir)
2668                         portage.util.ensure_dirs(catdir,
2669                                 gid=portage.portage_gid,
2670                                 mode=070, mask=0)
2671                         self._lock_obj = portage.locks.lockdir(dir_path)
2672                 finally:
2673                         self.locked = self._lock_obj is not None
2674                         if catdir_lock is not None:
2675                                 portage.locks.unlockdir(catdir_lock)
2676
2677         def clean_log(self):
2678                 """Discard existing log."""
2679                 settings = self.settings
2680
2681                 for x in ('.logid', 'temp/build.log'):
2682                         try:
2683                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2684                         except OSError:
2685                                 pass
2686
2687         def unlock(self):
2688                 if self._lock_obj is None:
2689                         return
2690
2691                 portage.locks.unlockdir(self._lock_obj)
2692                 self._lock_obj = None
2693                 self.locked = False
2694
2695                 catdir = self._catdir
2696                 catdir_lock = None
2697                 try:
2698                         catdir_lock = portage.locks.lockdir(catdir)
2699                 finally:
2700                         if catdir_lock:
2701                                 try:
2702                                         os.rmdir(catdir)
2703                                 except OSError, e:
2704                                         if e.errno not in (errno.ENOENT,
2705                                                 errno.ENOTEMPTY, errno.EEXIST):
2706                                                 raise
2707                                         del e
2708                                 portage.locks.unlockdir(catdir_lock)
2709
2710         class AlreadyLocked(portage.exception.PortageException):
2711                 pass
2712
2713 class EbuildBuild(CompositeTask):
2714
2715         __slots__ = ("args_set", "config_pool", "find_blockers",
2716                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2717                 "prefetcher", "settings", "world_atom") + \
2718                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2719
2720         def _start(self):
2721
2722                 logger = self.logger
2723                 opts = self.opts
2724                 pkg = self.pkg
2725                 settings = self.settings
2726                 world_atom = self.world_atom
2727                 root_config = pkg.root_config
2728                 tree = "porttree"
2729                 self._tree = tree
2730                 portdb = root_config.trees[tree].dbapi
2731                 settings.setcpv(pkg)
2732                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2733                 ebuild_path = portdb.findname(self.pkg.cpv)
2734                 self._ebuild_path = ebuild_path
2735
2736                 prefetcher = self.prefetcher
2737                 if prefetcher is None:
2738                         pass
2739                 elif not prefetcher.isAlive():
2740                         prefetcher.cancel()
2741                 elif prefetcher.poll() is None:
2742
2743                         waiting_msg = "Fetching files " + \
2744                                 "in the background. " + \
2745                                 "To view fetch progress, run `tail -f " + \
2746                                 "/var/log/emerge-fetch.log` in another " + \
2747                                 "terminal."
2748                         msg_prefix = colorize("GOOD", " * ")
2749                         from textwrap import wrap
2750                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2751                                 for line in wrap(waiting_msg, 65))
2752                         if not self.background:
2753                                 writemsg(waiting_msg, noiselevel=-1)
2754
2755                         self._current_task = prefetcher
2756                         prefetcher.addExitListener(self._prefetch_exit)
2757                         return
2758
2759                 self._prefetch_exit(prefetcher)
2760
2761         def _prefetch_exit(self, prefetcher):
2762
2763                 opts = self.opts
2764                 pkg = self.pkg
2765                 settings = self.settings
2766
2767                 if opts.fetchonly:
2768                                 fetcher = EbuildFetchonly(
2769                                         fetch_all=opts.fetch_all_uri,
2770                                         pkg=pkg, pretend=opts.pretend,
2771                                         settings=settings)
2772                                 retval = fetcher.execute()
2773                                 self.returncode = retval
2774                                 self.wait()
2775                                 return
2776
2777                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2778                         fetchall=opts.fetch_all_uri,
2779                         fetchonly=opts.fetchonly,
2780                         background=self.background,
2781                         pkg=pkg, scheduler=self.scheduler)
2782
2783                 self._start_task(fetcher, self._fetch_exit)
2784
2785         def _fetch_exit(self, fetcher):
2786                 opts = self.opts
2787                 pkg = self.pkg
2788
2789                 fetch_failed = False
2790                 if opts.fetchonly:
2791                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2792                 else:
2793                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2794
2795                 if fetch_failed and fetcher.logfile is not None and \
2796                         os.path.exists(fetcher.logfile):
2797                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2798
2799                 if not fetch_failed and fetcher.logfile is not None:
2800                         # Fetch was successful, so remove the fetch log.
2801                         try:
2802                                 os.unlink(fetcher.logfile)
2803                         except OSError:
2804                                 pass
2805
2806                 if fetch_failed or opts.fetchonly:
2807                         self.wait()
2808                         return
2809
2810                 logger = self.logger
2811                 opts = self.opts
2812                 pkg_count = self.pkg_count
2813                 scheduler = self.scheduler
2814                 settings = self.settings
2815                 features = settings.features
2816                 ebuild_path = self._ebuild_path
2817                 system_set = pkg.root_config.sets["system"]
2818
2819                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2820                 self._build_dir.lock()
2821
2822                 # Cleaning is triggered before the setup
2823                 # phase, in portage.doebuild().
2824                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2825                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2826                 short_msg = "emerge: (%s of %s) %s Clean" % \
2827                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2828                 logger.log(msg, short_msg=short_msg)
2829
2830                 #buildsyspkg: Check if we need to _force_ binary package creation
2831                 self._issyspkg = "buildsyspkg" in features and \
2832                                 system_set.findAtomForPackage(pkg) and \
2833                                 not opts.buildpkg
2834
2835                 if opts.buildpkg or self._issyspkg:
2836
2837                         self._buildpkg = True
2838
2839                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2840                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2841                         short_msg = "emerge: (%s of %s) %s Compile" % \
2842                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2843                         logger.log(msg, short_msg=short_msg)
2844
2845                 else:
2846                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2847                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2848                         short_msg = "emerge: (%s of %s) %s Compile" % \
2849                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2850                         logger.log(msg, short_msg=short_msg)
2851
2852                 build = EbuildExecuter(background=self.background, pkg=pkg,
2853                         scheduler=scheduler, settings=settings)
2854                 self._start_task(build, self._build_exit)
2855
2856         def _unlock_builddir(self):
2857                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2858                 self._build_dir.unlock()
2859
2860         def _build_exit(self, build):
2861                 if self._default_exit(build) != os.EX_OK:
2862                         self._unlock_builddir()
2863                         self.wait()
2864                         return
2865
2866                 opts = self.opts
2867                 buildpkg = self._buildpkg
2868
2869                 if not buildpkg:
2870                         self._final_exit(build)
2871                         self.wait()
2872                         return
2873
2874                 if self._issyspkg:
2875                         msg = ">>> This is a system package, " + \
2876                                 "let's pack a rescue tarball.\n"
2877
2878                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2879                         if log_path is not None:
2880                                 log_file = open(log_path, 'a')
2881                                 try:
2882                                         log_file.write(msg)
2883                                 finally:
2884                                         log_file.close()
2885
2886                         if not self.background:
2887                                 portage.writemsg_stdout(msg, noiselevel=-1)
2888
2889                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2890                         scheduler=self.scheduler, settings=self.settings)
2891
2892                 self._start_task(packager, self._buildpkg_exit)
2893
2894         def _buildpkg_exit(self, packager):
2895                 """
2896                 Released build dir lock when there is a failure or
2897                 when in buildpkgonly mode. Otherwise, the lock will
2898                 be released when merge() is called.
2899                 """
2900
2901                 if self._default_exit(packager) != os.EX_OK:
2902                         self._unlock_builddir()
2903                         self.wait()
2904                         return
2905
2906                 if self.opts.buildpkgonly:
2907                         # Need to call "clean" phase for buildpkgonly mode
2908                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2909                         phase = "clean"
2910                         clean_phase = EbuildPhase(background=self.background,
2911                                 pkg=self.pkg, phase=phase,
2912                                 scheduler=self.scheduler, settings=self.settings,
2913                                 tree=self._tree)
2914                         self._start_task(clean_phase, self._clean_exit)
2915                         return
2916
2917                 # Continue holding the builddir lock until
2918                 # after the package has been installed.
2919                 self._current_task = None
2920                 self.returncode = packager.returncode
2921                 self.wait()
2922
2923         def _clean_exit(self, clean_phase):
2924                 if self._final_exit(clean_phase) != os.EX_OK or \
2925                         self.opts.buildpkgonly:
2926                         self._unlock_builddir()
2927                 self.wait()
2928
2929         def install(self):
2930                 """
2931                 Install the package and then clean up and release locks.
2932                 Only call this after the build has completed successfully
2933                 and neither fetchonly nor buildpkgonly mode are enabled.
2934                 """
2935
2936                 find_blockers = self.find_blockers
2937                 ldpath_mtimes = self.ldpath_mtimes
2938                 logger = self.logger
2939                 pkg = self.pkg
2940                 pkg_count = self.pkg_count
2941                 settings = self.settings
2942                 world_atom = self.world_atom
2943                 ebuild_path = self._ebuild_path
2944                 tree = self._tree
2945
2946                 merge = EbuildMerge(find_blockers=self.find_blockers,
2947                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2948                         pkg_count=pkg_count, pkg_path=ebuild_path,
2949                         scheduler=self.scheduler,
2950                         settings=settings, tree=tree, world_atom=world_atom)
2951
2952                 msg = " === (%s of %s) Merging (%s::%s)" % \
2953                         (pkg_count.curval, pkg_count.maxval,
2954                         pkg.cpv, ebuild_path)
2955                 short_msg = "emerge: (%s of %s) %s Merge" % \
2956                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2957                 logger.log(msg, short_msg=short_msg)
2958
2959                 try:
2960                         rval = merge.execute()
2961                 finally:
2962                         self._unlock_builddir()
2963
2964                 return rval
2965
2966 class EbuildExecuter(CompositeTask):
2967
2968         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2969
2970         _phases = ("prepare", "configure", "compile", "test", "install")
2971
2972         _live_eclasses = frozenset([
2973                 "bzr",
2974                 "cvs",
2975                 "darcs",
2976                 "git",
2977                 "mercurial",
2978                 "subversion"
2979         ])
2980
2981         def _start(self):
2982                 self._tree = "porttree"
2983                 pkg = self.pkg
2984                 phase = "clean"
2985                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2986                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2987                 self._start_task(clean_phase, self._clean_phase_exit)
2988
2989         def _clean_phase_exit(self, clean_phase):
2990
2991                 if self._default_exit(clean_phase) != os.EX_OK:
2992                         self.wait()
2993                         return
2994
2995                 pkg = self.pkg
2996                 scheduler = self.scheduler
2997                 settings = self.settings
2998                 cleanup = 1
2999
3000                 # This initializes PORTAGE_LOG_FILE.
3001                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3002
3003                 setup_phase = EbuildPhase(background=self.background,
3004                         pkg=pkg, phase="setup", scheduler=scheduler,
3005                         settings=settings, tree=self._tree)
3006
3007                 setup_phase.addExitListener(self._setup_exit)
3008                 self._current_task = setup_phase
3009                 self.scheduler.scheduleSetup(setup_phase)
3010
3011         def _setup_exit(self, setup_phase):
3012
3013                 if self._default_exit(setup_phase) != os.EX_OK:
3014                         self.wait()
3015                         return
3016
3017                 unpack_phase = EbuildPhase(background=self.background,
3018                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3019                         settings=self.settings, tree=self._tree)
3020
3021                 if self._live_eclasses.intersection(self.pkg.inherited):
3022                         # Serialize $DISTDIR access for live ebuilds since
3023                         # otherwise they can interfere with eachother.
3024
3025                         unpack_phase.addExitListener(self._unpack_exit)
3026                         self._current_task = unpack_phase
3027                         self.scheduler.scheduleUnpack(unpack_phase)
3028
3029                 else:
3030                         self._start_task(unpack_phase, self._unpack_exit)
3031
3032         def _unpack_exit(self, unpack_phase):
3033
3034                 if self._default_exit(unpack_phase) != os.EX_OK:
3035                         self.wait()
3036                         return
3037
3038                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3039
3040                 pkg = self.pkg
3041                 phases = self._phases
3042                 eapi = pkg.metadata["EAPI"]
3043                 if eapi in ("0", "1"):
3044                         # skip src_prepare and src_configure
3045                         phases = phases[2:]
3046
3047                 for phase in phases:
3048                         ebuild_phases.add(EbuildPhase(background=self.background,
3049                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3050                                 settings=self.settings, tree=self._tree))
3051
3052                 self._start_task(ebuild_phases, self._default_final_exit)
3053
3054 class EbuildMetadataPhase(SubProcess):
3055
3056         """
3057         Asynchronous interface for the ebuild "depend" phase which is
3058         used to extract metadata from the ebuild.
3059         """
3060
3061         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3062                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3063                 ("_raw_metadata",)
3064
3065         _file_names = ("ebuild",)
3066         _files_dict = slot_dict_class(_file_names, prefix="")
3067         _metadata_fd = 9
3068
3069         def _start(self):
3070                 settings = self.settings
3071                 settings.setcpv(self.cpv)
3072                 ebuild_path = self.ebuild_path
3073
3074                 eapi = None
3075                 if 'parse-eapi-glep-55' in settings.features:
3076                         pf, eapi = portage._split_ebuild_name_glep55(
3077                                 os.path.basename(ebuild_path))
3078                 if eapi is None and \
3079                         'parse-eapi-ebuild-head' in settings.features:
3080                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3081                                 mode='r', encoding='utf_8', errors='replace'))
3082
3083                 if eapi is not None:
3084                         if not portage.eapi_is_supported(eapi):
3085                                 self.metadata_callback(self.cpv, self.ebuild_path,
3086                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3087                                 self.returncode = os.EX_OK
3088                                 self.wait()
3089                                 return
3090
3091                         settings.configdict['pkg']['EAPI'] = eapi
3092
3093                 debug = settings.get("PORTAGE_DEBUG") == "1"
3094                 master_fd = None
3095                 slave_fd = None
3096                 fd_pipes = None
3097                 if self.fd_pipes is not None:
3098                         fd_pipes = self.fd_pipes.copy()
3099                 else:
3100                         fd_pipes = {}
3101
3102                 fd_pipes.setdefault(0, sys.stdin.fileno())
3103                 fd_pipes.setdefault(1, sys.stdout.fileno())
3104                 fd_pipes.setdefault(2, sys.stderr.fileno())
3105
3106                 # flush any pending output
3107                 for fd in fd_pipes.itervalues():
3108                         if fd == sys.stdout.fileno():
3109                                 sys.stdout.flush()
3110                         if fd == sys.stderr.fileno():
3111                                 sys.stderr.flush()
3112
3113                 fd_pipes_orig = fd_pipes.copy()
3114                 self._files = self._files_dict()
3115                 files = self._files
3116
3117                 master_fd, slave_fd = os.pipe()
3118                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3119                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3120
3121                 fd_pipes[self._metadata_fd] = slave_fd
3122
3123                 self._raw_metadata = []
3124                 files.ebuild = os.fdopen(master_fd, 'r')
3125                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3126                         self._registered_events, self._output_handler)
3127                 self._registered = True
3128
3129                 retval = portage.doebuild(ebuild_path, "depend",
3130                         settings["ROOT"], settings, debug,
3131                         mydbapi=self.portdb, tree="porttree",
3132                         fd_pipes=fd_pipes, returnpid=True)
3133
3134                 os.close(slave_fd)
3135
3136                 if isinstance(retval, int):
3137                         # doebuild failed before spawning
3138                         self._unregister()
3139                         self.returncode = retval
3140                         self.wait()
3141                         return
3142
3143                 self.pid = retval[0]
3144                 portage.process.spawned_pids.remove(self.pid)
3145
3146         def _output_handler(self, fd, event):
3147
3148                 if event & PollConstants.POLLIN:
3149                         self._raw_metadata.append(self._files.ebuild.read())
3150                         if not self._raw_metadata[-1]:
3151                                 self._unregister()
3152                                 self.wait()
3153
3154                 self._unregister_if_appropriate(event)
3155                 return self._registered
3156
3157         def _set_returncode(self, wait_retval):
3158                 SubProcess._set_returncode(self, wait_retval)
3159                 if self.returncode == os.EX_OK:
3160                         metadata_lines = "".join(self._raw_metadata).splitlines()
3161                         if len(portage.auxdbkeys) != len(metadata_lines):
3162                                 # Don't trust bash's returncode if the
3163                                 # number of lines is incorrect.
3164                                 self.returncode = 1
3165                         else:
3166                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3167                                 self.metadata = self.metadata_callback(self.cpv,
3168                                         self.ebuild_path, self.repo_path, metadata,
3169                                         self.ebuild_mtime)
3170
3171 class EbuildProcess(SpawnProcess):
3172
3173         __slots__ = ("phase", "pkg", "settings", "tree")
3174
3175         def _start(self):
3176                 # Don't open the log file during the clean phase since the
3177                 # open file can result in an nfs lock on $T/build.log which
3178                 # prevents the clean phase from removing $T.
3179                 if self.phase not in ("clean", "cleanrm"):
3180                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3181                 SpawnProcess._start(self)
3182
3183         def _pipe(self, fd_pipes):
3184                 stdout_pipe = fd_pipes.get(1)
3185                 got_pty, master_fd, slave_fd = \
3186                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3187                 return (master_fd, slave_fd)
3188
3189         def _spawn(self, args, **kwargs):
3190
3191                 root_config = self.pkg.root_config
3192                 tree = self.tree
3193                 mydbapi = root_config.trees[tree].dbapi
3194                 settings = self.settings
3195                 ebuild_path = settings["EBUILD"]
3196                 debug = settings.get("PORTAGE_DEBUG") == "1"
3197
3198                 rval = portage.doebuild(ebuild_path, self.phase,
3199                         root_config.root, settings, debug,
3200                         mydbapi=mydbapi, tree=tree, **kwargs)
3201
3202                 return rval
3203
3204         def _set_returncode(self, wait_retval):
3205                 SpawnProcess._set_returncode(self, wait_retval)
3206
3207                 if self.phase not in ("clean", "cleanrm"):
3208                         self.returncode = portage._doebuild_exit_status_check_and_log(
3209                                 self.settings, self.phase, self.returncode)
3210
3211                 if self.phase == "test" and self.returncode != os.EX_OK and \
3212                         "test-fail-continue" in self.settings.features:
3213                         self.returncode = os.EX_OK
3214
3215                 portage._post_phase_userpriv_perms(self.settings)
3216
3217 class EbuildPhase(CompositeTask):
3218
3219         __slots__ = ("background", "pkg", "phase",
3220                 "scheduler", "settings", "tree")
3221
3222         _post_phase_cmds = portage._post_phase_cmds
3223
3224         def _start(self):
3225
3226                 ebuild_process = EbuildProcess(background=self.background,
3227                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3228                         settings=self.settings, tree=self.tree)
3229
3230                 self._start_task(ebuild_process, self._ebuild_exit)
3231
3232         def _ebuild_exit(self, ebuild_process):
3233
3234                 if self.phase == "install":
3235                         out = None
3236                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3237                         log_file = None
3238                         if self.background and log_path is not None:
3239                                 log_file = open(log_path, 'a')
3240                                 out = log_file
3241                         try:
3242                                 portage._check_build_log(self.settings, out=out)
3243                         finally:
3244                                 if log_file is not None:
3245                                         log_file.close()
3246
3247                 if self._default_exit(ebuild_process) != os.EX_OK:
3248                         self.wait()
3249                         return
3250
3251                 settings = self.settings
3252
3253                 if self.phase == "install":
3254                         portage._post_src_install_chost_fix(settings)
3255                         portage._post_src_install_uid_fix(settings)
3256
3257                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3258                 if post_phase_cmds is not None:
3259                         post_phase = MiscFunctionsProcess(background=self.background,
3260                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3261                                 scheduler=self.scheduler, settings=settings)
3262                         self._start_task(post_phase, self._post_phase_exit)
3263                         return
3264
3265                 self.returncode = ebuild_process.returncode
3266                 self._current_task = None
3267                 self.wait()
3268
3269         def _post_phase_exit(self, post_phase):
3270                 if self._final_exit(post_phase) != os.EX_OK:
3271                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3272                                 noiselevel=-1)
3273                 self._current_task = None
3274                 self.wait()
3275                 return
3276
3277 class EbuildBinpkg(EbuildProcess):
3278         """
3279         This assumes that src_install() has successfully completed.
3280         """
3281         __slots__ = ("_binpkg_tmpfile",)
3282
3283         def _start(self):
3284                 self.phase = "package"
3285                 self.tree = "porttree"
3286                 pkg = self.pkg
3287                 root_config = pkg.root_config
3288                 portdb = root_config.trees["porttree"].dbapi
3289                 bintree = root_config.trees["bintree"]
3290                 ebuild_path = portdb.findname(self.pkg.cpv)
3291                 settings = self.settings
3292                 debug = settings.get("PORTAGE_DEBUG") == "1"
3293
3294                 bintree.prevent_collision(pkg.cpv)
3295                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3296                         pkg.cpv + ".tbz2." + str(os.getpid()))
3297                 self._binpkg_tmpfile = binpkg_tmpfile
3298                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3299                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3300
3301                 try:
3302                         EbuildProcess._start(self)
3303                 finally:
3304                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3305
3306         def _set_returncode(self, wait_retval):
3307                 EbuildProcess._set_returncode(self, wait_retval)
3308
3309                 pkg = self.pkg
3310                 bintree = pkg.root_config.trees["bintree"]
3311                 binpkg_tmpfile = self._binpkg_tmpfile
3312                 if self.returncode == os.EX_OK:
3313                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3314
3315 class EbuildMerge(SlotObject):
3316
3317         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3318                 "pkg", "pkg_count", "pkg_path", "pretend",
3319                 "scheduler", "settings", "tree", "world_atom")
3320
3321         def execute(self):
3322                 root_config = self.pkg.root_config
3323                 settings = self.settings
3324                 retval = portage.merge(settings["CATEGORY"],
3325                         settings["PF"], settings["D"],
3326                         os.path.join(settings["PORTAGE_BUILDDIR"],
3327                         "build-info"), root_config.root, settings,
3328                         myebuild=settings["EBUILD"],
3329                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3330                         vartree=root_config.trees["vartree"],
3331                         prev_mtimes=self.ldpath_mtimes,
3332                         scheduler=self.scheduler,
3333                         blockers=self.find_blockers)
3334
3335                 if retval == os.EX_OK:
3336                         self.world_atom(self.pkg)
3337                         self._log_success()
3338
3339                 return retval
3340
3341         def _log_success(self):
3342                 pkg = self.pkg
3343                 pkg_count = self.pkg_count
3344                 pkg_path = self.pkg_path
3345                 logger = self.logger
3346                 if "noclean" not in self.settings.features:
3347                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3348                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3349                         logger.log((" === (%s of %s) " + \
3350                                 "Post-Build Cleaning (%s::%s)") % \
3351                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3352                                 short_msg=short_msg)
3353                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3354                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3355
3356 class PackageUninstall(AsynchronousTask):
3357
3358         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3359
3360         def _start(self):
3361                 try:
3362                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3363                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3364                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3365                                 writemsg_level=self._writemsg_level)
3366                 except UninstallFailure, e:
3367                         self.returncode = e.status
3368                 else:
3369                         self.returncode = os.EX_OK
3370                 self.wait()
3371
3372         def _writemsg_level(self, msg, level=0, noiselevel=0):
3373
3374                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3375                 background = self.background
3376
3377                 if log_path is None:
3378                         if not (background and level < logging.WARNING):
3379                                 portage.util.writemsg_level(msg,
3380                                         level=level, noiselevel=noiselevel)
3381                 else:
3382                         if not background:
3383                                 portage.util.writemsg_level(msg,
3384                                         level=level, noiselevel=noiselevel)
3385
3386                         f = open(log_path, 'a')
3387                         try:
3388                                 f.write(msg)
3389                         finally:
3390                                 f.close()
3391
3392 class Binpkg(CompositeTask):
3393
3394         __slots__ = ("find_blockers",
3395                 "ldpath_mtimes", "logger", "opts",
3396                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3397                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3398                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3399
3400         def _writemsg_level(self, msg, level=0, noiselevel=0):
3401
3402                 if not self.background:
3403                         portage.util.writemsg_level(msg,
3404                                 level=level, noiselevel=noiselevel)
3405
3406                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3407                 if  log_path is not None:
3408                         f = open(log_path, 'a')
3409                         try:
3410                                 f.write(msg)
3411                         finally:
3412                                 f.close()
3413
3414         def _start(self):
3415
3416                 pkg = self.pkg
3417                 settings = self.settings
3418                 settings.setcpv(pkg)
3419                 self._tree = "bintree"
3420                 self._bintree = self.pkg.root_config.trees[self._tree]
3421                 self._verify = not self.opts.pretend
3422
3423                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3424                         "portage", pkg.category, pkg.pf)
3425                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3426                         pkg=pkg, settings=settings)
3427                 self._image_dir = os.path.join(dir_path, "image")
3428                 self._infloc = os.path.join(dir_path, "build-info")
3429                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3430                 settings["EBUILD"] = self._ebuild_path
3431                 debug = settings.get("PORTAGE_DEBUG") == "1"
3432                 portage.doebuild_environment(self._ebuild_path, "setup",
3433                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3434                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3435
3436                 # The prefetcher has already completed or it
3437                 # could be running now. If it's running now,
3438                 # wait for it to complete since it holds
3439                 # a lock on the file being fetched. The
3440                 # portage.locks functions are only designed
3441                 # to work between separate processes. Since
3442                 # the lock is held by the current process,
3443                 # use the scheduler and fetcher methods to
3444                 # synchronize with the fetcher.
3445                 prefetcher = self.prefetcher
3446                 if prefetcher is None:
3447                         pass
3448                 elif not prefetcher.isAlive():
3449                         prefetcher.cancel()
3450                 elif prefetcher.poll() is None:
3451
3452                         waiting_msg = ("Fetching '%s' " + \
3453                                 "in the background. " + \
3454                                 "To view fetch progress, run `tail -f " + \
3455                                 "/var/log/emerge-fetch.log` in another " + \
3456                                 "terminal.") % prefetcher.pkg_path
3457                         msg_prefix = colorize("GOOD", " * ")
3458                         from textwrap import wrap
3459                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3460                                 for line in wrap(waiting_msg, 65))
3461                         if not self.background:
3462                                 writemsg(waiting_msg, noiselevel=-1)
3463
3464                         self._current_task = prefetcher
3465                         prefetcher.addExitListener(self._prefetch_exit)
3466                         return
3467
3468                 self._prefetch_exit(prefetcher)
3469
3470         def _prefetch_exit(self, prefetcher):
3471
3472                 pkg = self.pkg
3473                 pkg_count = self.pkg_count
3474                 if not (self.opts.pretend or self.opts.fetchonly):
3475                         self._build_dir.lock()
3476                         # If necessary, discard old log so that we don't
3477                         # append to it.
3478                         self._build_dir.clean_log()
3479                         # Initialze PORTAGE_LOG_FILE.
3480                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3481                 fetcher = BinpkgFetcher(background=self.background,
3482                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3483                         pretend=self.opts.pretend, scheduler=self.scheduler)
3484                 pkg_path = fetcher.pkg_path
3485                 self._pkg_path = pkg_path
3486
3487                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3488
3489                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3490                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3491                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3492                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3493                         self.logger.log(msg, short_msg=short_msg)
3494                         self._start_task(fetcher, self._fetcher_exit)
3495                         return
3496
3497                 self._fetcher_exit(fetcher)
3498
3499         def _fetcher_exit(self, fetcher):
3500
3501                 # The fetcher only has a returncode when
3502                 # --getbinpkg is enabled.
3503                 if fetcher.returncode is not None:
3504                         self._fetched_pkg = True
3505                         if self._default_exit(fetcher) != os.EX_OK:
3506                                 self._unlock_builddir()
3507                                 self.wait()
3508                                 return
3509
3510                 if self.opts.pretend:
3511                         self._current_task = None
3512                         self.returncode = os.EX_OK
3513                         self.wait()
3514                         return
3515
3516                 verifier = None
3517                 if self._verify:
3518                         logfile = None
3519                         if self.background:
3520                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3521                         verifier = BinpkgVerifier(background=self.background,
3522                                 logfile=logfile, pkg=self.pkg)
3523                         self._start_task(verifier, self._verifier_exit)
3524                         return
3525
3526                 self._verifier_exit(verifier)
3527
3528         def _verifier_exit(self, verifier):
3529                 if verifier is not None and \
3530                         self._default_exit(verifier) != os.EX_OK:
3531                         self._unlock_builddir()
3532                         self.wait()
3533                         return
3534
3535                 logger = self.logger
3536                 pkg = self.pkg
3537                 pkg_count = self.pkg_count
3538                 pkg_path = self._pkg_path
3539
3540                 if self._fetched_pkg:
3541                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3542
3543                 if self.opts.fetchonly:
3544                         self._current_task = None
3545                         self.returncode = os.EX_OK
3546                         self.wait()
3547                         return
3548
3549                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3550                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3551                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3552                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3553                 logger.log(msg, short_msg=short_msg)
3554
3555                 phase = "clean"
3556                 settings = self.settings
3557                 ebuild_phase = EbuildPhase(background=self.background,
3558                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3559                         settings=settings, tree=self._tree)
3560
3561                 self._start_task(ebuild_phase, self._clean_exit)
3562
3563         def _clean_exit(self, clean_phase):
3564                 if self._default_exit(clean_phase) != os.EX_OK:
3565                         self._unlock_builddir()
3566                         self.wait()
3567                         return
3568
3569                 dir_path = self._build_dir.dir_path
3570
3571                 infloc = self._infloc
3572                 pkg = self.pkg
3573                 pkg_path = self._pkg_path
3574
3575                 dir_mode = 0755
3576                 for mydir in (dir_path, self._image_dir, infloc):
3577                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3578                                 gid=portage.data.portage_gid, mode=dir_mode)
3579
3580                 # This initializes PORTAGE_LOG_FILE.
3581                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3582                 self._writemsg_level(">>> Extracting info\n")
3583
3584                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3585                 check_missing_metadata = ("CATEGORY", "PF")
3586                 missing_metadata = set()
3587                 for k in check_missing_metadata:
3588                         v = pkg_xpak.getfile(k)
3589                         if not v:
3590                                 missing_metadata.add(k)
3591
3592                 pkg_xpak.unpackinfo(infloc)
3593                 for k in missing_metadata:
3594                         if k == "CATEGORY":
3595                                 v = pkg.category
3596                         elif k == "PF":
3597                                 v = pkg.pf
3598                         else:
3599                                 continue
3600
3601                         f = open(os.path.join(infloc, k), 'wb')
3602                         try:
3603                                 f.write(v + "\n")
3604                         finally:
3605                                 f.close()
3606
3607                 # Store the md5sum in the vdb.
3608                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3609                 try:
3610                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3611                 finally:
3612                         f.close()
3613
3614                 # This gives bashrc users an opportunity to do various things
3615                 # such as remove binary packages after they're installed.
3616                 settings = self.settings
3617                 settings.setcpv(self.pkg)
3618                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3619                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3620
3621                 phase = "setup"
3622                 setup_phase = EbuildPhase(background=self.background,
3623                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3624                         settings=settings, tree=self._tree)
3625
3626                 setup_phase.addExitListener(self._setup_exit)
3627                 self._current_task = setup_phase
3628                 self.scheduler.scheduleSetup(setup_phase)
3629
3630         def _setup_exit(self, setup_phase):
3631                 if self._default_exit(setup_phase) != os.EX_OK:
3632                         self._unlock_builddir()
3633                         self.wait()
3634                         return
3635
3636                 extractor = BinpkgExtractorAsync(background=self.background,
3637                         image_dir=self._image_dir,
3638                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3639                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3640                 self._start_task(extractor, self._extractor_exit)
3641
3642         def _extractor_exit(self, extractor):
3643                 if self._final_exit(extractor) != os.EX_OK:
3644                         self._unlock_builddir()
3645                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3646                                 noiselevel=-1)
3647                 self.wait()
3648
3649         def _unlock_builddir(self):
3650                 if self.opts.pretend or self.opts.fetchonly:
3651                         return
3652                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3653                 self._build_dir.unlock()
3654
3655         def install(self):
3656
3657                 # This gives bashrc users an opportunity to do various things
3658                 # such as remove binary packages after they're installed.
3659                 settings = self.settings
3660                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3661                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3662
3663                 merge = EbuildMerge(find_blockers=self.find_blockers,
3664                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3665                         pkg=self.pkg, pkg_count=self.pkg_count,
3666                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3667                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3668
3669                 try:
3670                         retval = merge.execute()
3671                 finally:
3672                         settings.pop("PORTAGE_BINPKG_FILE", None)
3673                         self._unlock_builddir()
3674                 return retval
3675
3676 class BinpkgFetcher(SpawnProcess):
3677
3678         __slots__ = ("pkg", "pretend",
3679                 "locked", "pkg_path", "_lock_obj")
3680
3681         def __init__(self, **kwargs):
3682                 SpawnProcess.__init__(self, **kwargs)
3683                 pkg = self.pkg
3684                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3685
3686         def _start(self):
3687
3688                 if self.cancelled:
3689                         return
3690
3691                 pkg = self.pkg
3692                 pretend = self.pretend
3693                 bintree = pkg.root_config.trees["bintree"]
3694                 settings = bintree.settings
3695                 use_locks = "distlocks" in settings.features
3696                 pkg_path = self.pkg_path
3697
3698                 if not pretend:
3699                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3700                         if use_locks:
3701                                 self.lock()
3702                 exists = os.path.exists(pkg_path)
3703                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3704                 if not (pretend or resume):
3705                         # Remove existing file or broken symlink.
3706                         try:
3707                                 os.unlink(pkg_path)
3708                         except OSError:
3709                                 pass
3710
3711                 # urljoin doesn't work correctly with
3712                 # unrecognized protocols like sftp
3713                 if bintree._remote_has_index:
3714                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3715                         if not rel_uri:
3716                                 rel_uri = pkg.cpv + ".tbz2"
3717                         uri = bintree._remote_base_uri.rstrip("/") + \
3718                                 "/" + rel_uri.lstrip("/")
3719                 else:
3720                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3721                                 "/" + pkg.pf + ".tbz2"
3722
3723                 if pretend:
3724                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3725                         self.returncode = os.EX_OK
3726                         self.wait()
3727                         return
3728
3729                 protocol = urlparse.urlparse(uri)[0]
3730                 fcmd_prefix = "FETCHCOMMAND"
3731                 if resume:
3732                         fcmd_prefix = "RESUMECOMMAND"
3733                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3734                 if not fcmd:
3735                         fcmd = settings.get(fcmd_prefix)
3736
3737                 fcmd_vars = {
3738                         "DISTDIR" : os.path.dirname(pkg_path),
3739                         "URI"     : uri,
3740                         "FILE"    : os.path.basename(pkg_path)
3741                 }
3742
3743                 fetch_env = dict(settings.iteritems())
3744                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3745                         for x in shlex.split(fcmd)]
3746
3747                 if self.fd_pipes is None:
3748                         self.fd_pipes = {}
3749                 fd_pipes = self.fd_pipes
3750
3751                 # Redirect all output to stdout since some fetchers like
3752                 # wget pollute stderr (if portage detects a problem then it
3753                 # can send it's own message to stderr).
3754                 fd_pipes.setdefault(0, sys.stdin.fileno())
3755                 fd_pipes.setdefault(1, sys.stdout.fileno())
3756                 fd_pipes.setdefault(2, sys.stdout.fileno())
3757
3758                 self.args = fetch_args
3759                 self.env = fetch_env
3760                 SpawnProcess._start(self)
3761
3762         def _set_returncode(self, wait_retval):
3763                 SpawnProcess._set_returncode(self, wait_retval)
3764                 if self.returncode == os.EX_OK:
3765                         # If possible, update the mtime to match the remote package if
3766                         # the fetcher didn't already do it automatically.
3767                         bintree = self.pkg.root_config.trees["bintree"]
3768                         if bintree._remote_has_index:
3769                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3770                                 if remote_mtime is not None:
3771                                         try:
3772                                                 remote_mtime = long(remote_mtime)
3773                                         except ValueError:
3774                                                 pass
3775                                         else:
3776                                                 try:
3777                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3778                                                 except OSError:
3779                                                         pass
3780                                                 else:
3781                                                         if remote_mtime != local_mtime:
3782                                                                 try:
3783                                                                         os.utime(self.pkg_path,
3784                                                                                 (remote_mtime, remote_mtime))
3785                                                                 except OSError:
3786                                                                         pass
3787
3788                 if self.locked:
3789                         self.unlock()
3790
3791         def lock(self):
3792                 """
3793                 This raises an AlreadyLocked exception if lock() is called
3794                 while a lock is already held. In order to avoid this, call
3795                 unlock() or check whether the "locked" attribute is True
3796                 or False before calling lock().
3797                 """
3798                 if self._lock_obj is not None:
3799                         raise self.AlreadyLocked((self._lock_obj,))
3800
3801                 self._lock_obj = portage.locks.lockfile(
3802                         self.pkg_path, wantnewlockfile=1)
3803                 self.locked = True
3804
3805         class AlreadyLocked(portage.exception.PortageException):
3806                 pass
3807
3808         def unlock(self):
3809                 if self._lock_obj is None:
3810                         return
3811                 portage.locks.unlockfile(self._lock_obj)
3812                 self._lock_obj = None
3813                 self.locked = False
3814
3815 class BinpkgVerifier(AsynchronousTask):
3816         __slots__ = ("logfile", "pkg",)
3817
3818         def _start(self):
3819                 """
3820                 Note: Unlike a normal AsynchronousTask.start() method,
3821                 this one does all work is synchronously. The returncode
3822                 attribute will be set before it returns.
3823                 """
3824
3825                 pkg = self.pkg
3826                 root_config = pkg.root_config
3827                 bintree = root_config.trees["bintree"]
3828                 rval = os.EX_OK
3829                 stdout_orig = sys.stdout
3830                 stderr_orig = sys.stderr
3831                 log_file = None
3832                 if self.background and self.logfile is not None:
3833                         log_file = open(self.logfile, 'a')
3834                 try:
3835                         if log_file is not None:
3836                                 sys.stdout = log_file
3837                                 sys.stderr = log_file
3838                         try:
3839                                 bintree.digestCheck(pkg)
3840                         except portage.exception.FileNotFound:
3841                                 writemsg("!!! Fetching Binary failed " + \
3842                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3843                                 rval = 1
3844                         except portage.exception.DigestException, e:
3845                                 writemsg("\n!!! Digest verification failed:\n",
3846                                         noiselevel=-1)
3847                                 writemsg("!!! %s\n" % e.value[0],
3848                                         noiselevel=-1)
3849                                 writemsg("!!! Reason: %s\n" % e.value[1],
3850                                         noiselevel=-1)
3851                                 writemsg("!!! Got: %s\n" % e.value[2],
3852                                         noiselevel=-1)
3853                                 writemsg("!!! Expected: %s\n" % e.value[3],
3854                                         noiselevel=-1)
3855                                 rval = 1
3856                         if rval != os.EX_OK:
3857                                 pkg_path = bintree.getname(pkg.cpv)
3858                                 head, tail = os.path.split(pkg_path)
3859                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3860                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3861                                         noiselevel=-1)
3862                 finally:
3863                         sys.stdout = stdout_orig
3864                         sys.stderr = stderr_orig
3865                         if log_file is not None:
3866                                 log_file.close()
3867
3868                 self.returncode = rval
3869                 self.wait()
3870
3871 class BinpkgPrefetcher(CompositeTask):
3872
3873         __slots__ = ("pkg",) + \
3874                 ("pkg_path", "_bintree",)
3875
3876         def _start(self):
3877                 self._bintree = self.pkg.root_config.trees["bintree"]
3878                 fetcher = BinpkgFetcher(background=self.background,
3879                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3880                         scheduler=self.scheduler)
3881                 self.pkg_path = fetcher.pkg_path
3882                 self._start_task(fetcher, self._fetcher_exit)
3883
3884         def _fetcher_exit(self, fetcher):
3885
3886                 if self._default_exit(fetcher) != os.EX_OK:
3887                         self.wait()
3888                         return
3889
3890                 verifier = BinpkgVerifier(background=self.background,
3891                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3892                 self._start_task(verifier, self._verifier_exit)
3893
3894         def _verifier_exit(self, verifier):
3895                 if self._default_exit(verifier) != os.EX_OK:
3896                         self.wait()
3897                         return
3898
3899                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3900
3901                 self._current_task = None
3902                 self.returncode = os.EX_OK
3903                 self.wait()
3904
3905 class BinpkgExtractorAsync(SpawnProcess):
3906
3907         __slots__ = ("image_dir", "pkg", "pkg_path")
3908
3909         _shell_binary = portage.const.BASH_BINARY
3910
3911         def _start(self):
3912                 self.args = [self._shell_binary, "-c",
3913                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3914                         (portage._shell_quote(self.pkg_path),
3915                         portage._shell_quote(self.image_dir))]
3916
3917                 self.env = self.pkg.root_config.settings.environ()
3918                 SpawnProcess._start(self)
3919
3920 class MergeListItem(CompositeTask):
3921
3922         """
3923         TODO: For parallel scheduling, everything here needs asynchronous
3924         execution support (start, poll, and wait methods).
3925         """
3926
3927         __slots__ = ("args_set",
3928                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3929                 "find_blockers", "logger", "mtimedb", "pkg",
3930                 "pkg_count", "pkg_to_replace", "prefetcher",
3931                 "settings", "statusMessage", "world_atom") + \
3932                 ("_install_task",)
3933
3934         def _start(self):
3935
3936                 pkg = self.pkg
3937                 build_opts = self.build_opts
3938
3939                 if pkg.installed:
3940                         # uninstall,  executed by self.merge()
3941                         self.returncode = os.EX_OK
3942                         self.wait()
3943                         return
3944
3945                 args_set = self.args_set
3946                 find_blockers = self.find_blockers
3947                 logger = self.logger
3948                 mtimedb = self.mtimedb
3949                 pkg_count = self.pkg_count
3950                 scheduler = self.scheduler
3951                 settings = self.settings
3952                 world_atom = self.world_atom
3953                 ldpath_mtimes = mtimedb["ldpath"]
3954
3955                 action_desc = "Emerging"
3956                 preposition = "for"
3957                 if pkg.type_name == "binary":
3958                         action_desc += " binary"
3959
3960                 if build_opts.fetchonly:
3961                         action_desc = "Fetching"
3962
3963                 msg = "%s (%s of %s) %s" % \
3964                         (action_desc,
3965                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3966                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3967                         colorize("GOOD", pkg.cpv))
3968
3969                 portdb = pkg.root_config.trees["porttree"].dbapi
3970                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3971                 if portdir_repo_name:
3972                         pkg_repo_name = pkg.metadata.get("repository")
3973                         if pkg_repo_name != portdir_repo_name:
3974                                 if not pkg_repo_name:
3975                                         pkg_repo_name = "unknown repo"
3976                                 msg += " from %s" % pkg_repo_name
3977
3978                 if pkg.root != "/":
3979                         msg += " %s %s" % (preposition, pkg.root)
3980
3981                 if not build_opts.pretend:
3982                         self.statusMessage(msg)
3983                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3984                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3985
3986                 if pkg.type_name == "ebuild":
3987
3988                         build = EbuildBuild(args_set=args_set,
3989                                 background=self.background,
3990                                 config_pool=self.config_pool,
3991                                 find_blockers=find_blockers,
3992                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3993                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3994                                 prefetcher=self.prefetcher, scheduler=scheduler,
3995                                 settings=settings, world_atom=world_atom)
3996
3997                         self._install_task = build
3998                         self._start_task(build, self._default_final_exit)
3999                         return
4000
4001                 elif pkg.type_name == "binary":
4002
4003                         binpkg = Binpkg(background=self.background,
4004                                 find_blockers=find_blockers,
4005                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4006                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4007                                 prefetcher=self.prefetcher, settings=settings,
4008                                 scheduler=scheduler, world_atom=world_atom)
4009
4010                         self._install_task = binpkg
4011                         self._start_task(binpkg, self._default_final_exit)
4012                         return
4013
4014         def _poll(self):
4015                 self._install_task.poll()
4016                 return self.returncode
4017
4018         def _wait(self):
4019                 self._install_task.wait()
4020                 return self.returncode
4021
4022         def merge(self):
4023
4024                 pkg = self.pkg
4025                 build_opts = self.build_opts
4026                 find_blockers = self.find_blockers
4027                 logger = self.logger
4028                 mtimedb = self.mtimedb
4029                 pkg_count = self.pkg_count
4030                 prefetcher = self.prefetcher
4031                 scheduler = self.scheduler
4032                 settings = self.settings
4033                 world_atom = self.world_atom
4034                 ldpath_mtimes = mtimedb["ldpath"]
4035
4036                 if pkg.installed:
4037                         if not (build_opts.buildpkgonly or \
4038                                 build_opts.fetchonly or build_opts.pretend):
4039
4040                                 uninstall = PackageUninstall(background=self.background,
4041                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4042                                         pkg=pkg, scheduler=scheduler, settings=settings)
4043
4044                                 uninstall.start()
4045                                 retval = uninstall.wait()
4046                                 if retval != os.EX_OK:
4047                                         return retval
4048                         return os.EX_OK
4049
4050                 if build_opts.fetchonly or \
4051                         build_opts.buildpkgonly:
4052                         return self.returncode
4053
4054                 retval = self._install_task.install()
4055                 return retval
4056
4057 class PackageMerge(AsynchronousTask):
4058         """
4059         TODO: Implement asynchronous merge so that the scheduler can
4060         run while a merge is executing.
4061         """
4062
4063         __slots__ = ("merge",)
4064
4065         def _start(self):
4066
4067                 pkg = self.merge.pkg
4068                 pkg_count = self.merge.pkg_count
4069
4070                 if pkg.installed:
4071                         action_desc = "Uninstalling"
4072                         preposition = "from"
4073                         counter_str = ""
4074                 else:
4075                         action_desc = "Installing"
4076                         preposition = "to"
4077                         counter_str = "(%s of %s) " % \
4078                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4079                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4080
4081                 msg = "%s %s%s" % \
4082                         (action_desc,
4083                         counter_str,
4084                         colorize("GOOD", pkg.cpv))
4085
4086                 if pkg.root != "/":
4087                         msg += " %s %s" % (preposition, pkg.root)
4088
4089                 if not self.merge.build_opts.fetchonly and \
4090                         not self.merge.build_opts.pretend and \
4091                         not self.merge.build_opts.buildpkgonly:
4092                         self.merge.statusMessage(msg)
4093
4094                 self.returncode = self.merge.merge()
4095                 self.wait()
4096
4097 class DependencyArg(object):
4098         def __init__(self, arg=None, root_config=None):
4099                 self.arg = arg
4100                 self.root_config = root_config
4101
4102         def __str__(self):
4103                 return str(self.arg)
4104
4105 class AtomArg(DependencyArg):
4106         def __init__(self, atom=None, **kwargs):
4107                 DependencyArg.__init__(self, **kwargs)
4108                 self.atom = atom
4109                 if not isinstance(self.atom, portage.dep.Atom):
4110                         self.atom = portage.dep.Atom(self.atom)
4111                 self.set = (self.atom, )
4112
4113 class PackageArg(DependencyArg):
4114         def __init__(self, package=None, **kwargs):
4115                 DependencyArg.__init__(self, **kwargs)
4116                 self.package = package
4117                 self.atom = portage.dep.Atom("=" + package.cpv)
4118                 self.set = (self.atom, )
4119
4120 class SetArg(DependencyArg):
4121         def __init__(self, set=None, **kwargs):
4122                 DependencyArg.__init__(self, **kwargs)
4123                 self.set = set
4124                 self.name = self.arg[len(SETPREFIX):]
4125
4126 class Dependency(SlotObject):
4127         __slots__ = ("atom", "blocker", "depth",
4128                 "parent", "onlydeps", "priority", "root")
4129         def __init__(self, **kwargs):
4130                 SlotObject.__init__(self, **kwargs)
4131                 if self.priority is None:
4132                         self.priority = DepPriority()
4133                 if self.depth is None:
4134                         self.depth = 0
4135
4136 class BlockerCache(portage.cache.mappings.MutableMapping):
4137         """This caches blockers of installed packages so that dep_check does not
4138         have to be done for every single installed package on every invocation of
4139         emerge.  The cache is invalidated whenever it is detected that something
4140         has changed that might alter the results of dep_check() calls:
4141                 1) the set of installed packages (including COUNTER) has changed
4142                 2) the old-style virtuals have changed
4143         """
4144
4145         # Number of uncached packages to trigger cache update, since
4146         # it's wasteful to update it for every vdb change.
4147         _cache_threshold = 5
4148
4149         class BlockerData(object):
4150
4151                 __slots__ = ("__weakref__", "atoms", "counter")
4152
4153                 def __init__(self, counter, atoms):
4154                         self.counter = counter
4155                         self.atoms = atoms
4156
4157         def __init__(self, myroot, vardb):
4158                 self._vardb = vardb
4159                 self._virtuals = vardb.settings.getvirtuals()
4160                 self._cache_filename = os.path.join(myroot,
4161                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4162                 self._cache_version = "1"
4163                 self._cache_data = None
4164                 self._modified = set()
4165                 self._load()
4166
4167         def _load(self):
4168                 try:
4169                         f = open(self._cache_filename, mode='rb')
4170                         mypickle = pickle.Unpickler(f)
4171                         try:
4172                                 mypickle.find_global = None
4173                         except AttributeError:
4174                                 # TODO: If py3k, override Unpickler.find_class().
4175                                 pass
4176                         self._cache_data = mypickle.load()
4177                         f.close()
4178                         del f
4179                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4180                         if isinstance(e, pickle.UnpicklingError):
4181                                 writemsg("!!! Error loading '%s': %s\n" % \
4182                                         (self._cache_filename, str(e)), noiselevel=-1)
4183                         del e
4184
4185                 cache_valid = self._cache_data and \
4186                         isinstance(self._cache_data, dict) and \
4187                         self._cache_data.get("version") == self._cache_version and \
4188                         isinstance(self._cache_data.get("blockers"), dict)
4189                 if cache_valid:
4190                         # Validate all the atoms and counters so that
4191                         # corruption is detected as soon as possible.
4192                         invalid_items = set()
4193                         for k, v in self._cache_data["blockers"].iteritems():
4194                                 if not isinstance(k, basestring):
4195                                         invalid_items.add(k)
4196                                         continue
4197                                 try:
4198                                         if portage.catpkgsplit(k) is None:
4199                                                 invalid_items.add(k)
4200                                                 continue
4201                                 except portage.exception.InvalidData:
4202                                         invalid_items.add(k)
4203                                         continue
4204                                 if not isinstance(v, tuple) or \
4205                                         len(v) != 2:
4206                                         invalid_items.add(k)
4207                                         continue
4208                                 counter, atoms = v
4209                                 if not isinstance(counter, (int, long)):
4210                                         invalid_items.add(k)
4211                                         continue
4212                                 if not isinstance(atoms, (list, tuple)):
4213                                         invalid_items.add(k)
4214                                         continue
4215                                 invalid_atom = False
4216                                 for atom in atoms:
4217                                         if not isinstance(atom, basestring):
4218                                                 invalid_atom = True
4219                                                 break
4220                                         if atom[:1] != "!" or \
4221                                                 not portage.isvalidatom(
4222                                                 atom, allow_blockers=True):
4223                                                 invalid_atom = True
4224                                                 break
4225                                 if invalid_atom:
4226                                         invalid_items.add(k)
4227                                         continue
4228
4229                         for k in invalid_items:
4230                                 del self._cache_data["blockers"][k]
4231                         if not self._cache_data["blockers"]:
4232                                 cache_valid = False
4233
4234                 if not cache_valid:
4235                         self._cache_data = {"version":self._cache_version}
4236                         self._cache_data["blockers"] = {}
4237                         self._cache_data["virtuals"] = self._virtuals
4238                 self._modified.clear()
4239
4240         def flush(self):
4241                 """If the current user has permission and the internal blocker cache
4242                 been updated, save it to disk and mark it unmodified.  This is called
4243                 by emerge after it has proccessed blockers for all installed packages.
4244                 Currently, the cache is only written if the user has superuser
4245                 privileges (since that's required to obtain a lock), but all users
4246                 have read access and benefit from faster blocker lookups (as long as
4247                 the entire cache is still valid).  The cache is stored as a pickled
4248                 dict object with the following format:
4249
4250                 {
4251                         version : "1",
4252                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4253                         "virtuals" : vardb.settings.getvirtuals()
4254                 }
4255                 """
4256                 if len(self._modified) >= self._cache_threshold and \
4257                         secpass >= 2:
4258                         try:
4259                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4260                                 pickle.dump(self._cache_data, f, protocol=2)
4261                                 f.close()
4262                                 portage.util.apply_secpass_permissions(
4263                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4264                         except (IOError, OSError), e:
4265                                 pass
4266                         self._modified.clear()
4267
4268         def __setitem__(self, cpv, blocker_data):
4269                 """
4270                 Update the cache and mark it as modified for a future call to
4271                 self.flush().
4272
4273                 @param cpv: Package for which to cache blockers.
4274                 @type cpv: String
4275                 @param blocker_data: An object with counter and atoms attributes.
4276                 @type blocker_data: BlockerData
4277                 """
4278                 self._cache_data["blockers"][cpv] = \
4279                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4280                 self._modified.add(cpv)
4281
4282         def __iter__(self):
4283                 if self._cache_data is None:
4284                         # triggered by python-trace
4285                         return iter([])
4286                 return iter(self._cache_data["blockers"])
4287
4288         def __delitem__(self, cpv):
4289                 del self._cache_data["blockers"][cpv]
4290
4291         def __getitem__(self, cpv):
4292                 """
4293                 @rtype: BlockerData
4294                 @returns: An object with counter and atoms attributes.
4295                 """
4296                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4297
4298 class BlockerDB(object):
4299
4300         def __init__(self, root_config):
4301                 self._root_config = root_config
4302                 self._vartree = root_config.trees["vartree"]
4303                 self._portdb = root_config.trees["porttree"].dbapi
4304
4305                 self._dep_check_trees = None
4306                 self._fake_vartree = None
4307
4308         def _get_fake_vartree(self, acquire_lock=0):
4309                 fake_vartree = self._fake_vartree
4310                 if fake_vartree is None:
4311                         fake_vartree = FakeVartree(self._root_config,
4312                                 acquire_lock=acquire_lock)
4313                         self._fake_vartree = fake_vartree
4314                         self._dep_check_trees = { self._vartree.root : {
4315                                 "porttree"    :  fake_vartree,
4316                                 "vartree"     :  fake_vartree,
4317                         }}
4318                 else:
4319                         fake_vartree.sync(acquire_lock=acquire_lock)
4320                 return fake_vartree
4321
4322         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4323                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4324                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4325                 settings = self._vartree.settings
4326                 stale_cache = set(blocker_cache)
4327                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4328                 dep_check_trees = self._dep_check_trees
4329                 vardb = fake_vartree.dbapi
4330                 installed_pkgs = list(vardb)
4331
4332                 for inst_pkg in installed_pkgs:
4333                         stale_cache.discard(inst_pkg.cpv)
4334                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4335                         if cached_blockers is not None and \
4336                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4337                                 cached_blockers = None
4338                         if cached_blockers is not None:
4339                                 blocker_atoms = cached_blockers.atoms
4340                         else:
4341                                 # Use aux_get() to trigger FakeVartree global
4342                                 # updates on *DEPEND when appropriate.
4343                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4344                                 try:
4345                                         portage.dep._dep_check_strict = False
4346                                         success, atoms = portage.dep_check(depstr,
4347                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4348                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4349                                 finally:
4350                                         portage.dep._dep_check_strict = True
4351                                 if not success:
4352                                         pkg_location = os.path.join(inst_pkg.root,
4353                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4354                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4355                                                 (pkg_location, atoms), noiselevel=-1)
4356                                         continue
4357
4358                                 blocker_atoms = [atom for atom in atoms \
4359                                         if atom.startswith("!")]
4360                                 blocker_atoms.sort()
4361                                 counter = long(inst_pkg.metadata["COUNTER"])
4362                                 blocker_cache[inst_pkg.cpv] = \
4363                                         blocker_cache.BlockerData(counter, blocker_atoms)
4364                 for cpv in stale_cache:
4365                         del blocker_cache[cpv]
4366                 blocker_cache.flush()
4367
4368                 blocker_parents = digraph()
4369                 blocker_atoms = []
4370                 for pkg in installed_pkgs:
4371                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4372                                 blocker_atom = blocker_atom.lstrip("!")
4373                                 blocker_atoms.append(blocker_atom)
4374                                 blocker_parents.add(blocker_atom, pkg)
4375
4376                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4377                 blocking_pkgs = set()
4378                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4379                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4380
4381                 # Check for blockers in the other direction.
4382                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4383                 try:
4384                         portage.dep._dep_check_strict = False
4385                         success, atoms = portage.dep_check(depstr,
4386                                 vardb, settings, myuse=new_pkg.use.enabled,
4387                                 trees=dep_check_trees, myroot=new_pkg.root)
4388                 finally:
4389                         portage.dep._dep_check_strict = True
4390                 if not success:
4391                         # We should never get this far with invalid deps.
4392                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4393                         assert False
4394
4395                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4396                         if atom[:1] == "!"]
4397                 if blocker_atoms:
4398                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4399                         for inst_pkg in installed_pkgs:
4400                                 try:
4401                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4402                                 except (portage.exception.InvalidDependString, StopIteration):
4403                                         continue
4404                                 blocking_pkgs.add(inst_pkg)
4405
4406                 return blocking_pkgs
4407
4408 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4409
4410         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4411                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4412         p_type, p_root, p_key, p_status = parent_node
4413         msg = []
4414         if p_status == "nomerge":
4415                 category, pf = portage.catsplit(p_key)
4416                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4417                 msg.append("Portage is unable to process the dependencies of the ")
4418                 msg.append("'%s' package. " % p_key)
4419                 msg.append("In order to correct this problem, the package ")
4420                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4421                 msg.append("As a temporary workaround, the --nodeps option can ")
4422                 msg.append("be used to ignore all dependencies.  For reference, ")
4423                 msg.append("the problematic dependencies can be found in the ")
4424                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4425         else:
4426                 msg.append("This package can not be installed. ")
4427                 msg.append("Please notify the '%s' package maintainer " % p_key)
4428                 msg.append("about this problem.")
4429
4430         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4431         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4432
4433 class PackageVirtualDbapi(portage.dbapi):
4434         """
4435         A dbapi-like interface class that represents the state of the installed
4436         package database as new packages are installed, replacing any packages
4437         that previously existed in the same slot. The main difference between
4438         this class and fakedbapi is that this one uses Package instances
4439         internally (passed in via cpv_inject() and cpv_remove() calls).
4440         """
4441         def __init__(self, settings):
4442                 portage.dbapi.__init__(self)
4443                 self.settings = settings
4444                 self._match_cache = {}
4445                 self._cp_map = {}
4446                 self._cpv_map = {}
4447
4448         def clear(self):
4449                 """
4450                 Remove all packages.
4451                 """
4452                 if self._cpv_map:
4453                         self._clear_cache()
4454                         self._cp_map.clear()
4455                         self._cpv_map.clear()
4456
4457         def copy(self):
4458                 obj = PackageVirtualDbapi(self.settings)
4459                 obj._match_cache = self._match_cache.copy()
4460                 obj._cp_map = self._cp_map.copy()
4461                 for k, v in obj._cp_map.iteritems():
4462                         obj._cp_map[k] = v[:]
4463                 obj._cpv_map = self._cpv_map.copy()
4464                 return obj
4465
4466         def __iter__(self):
4467                 return self._cpv_map.itervalues()
4468
4469         def __contains__(self, item):
4470                 existing = self._cpv_map.get(item.cpv)
4471                 if existing is not None and \
4472                         existing == item:
4473                         return True
4474                 return False
4475
4476         def get(self, item, default=None):
4477                 cpv = getattr(item, "cpv", None)
4478                 if cpv is None:
4479                         if len(item) != 4:
4480                                 return default
4481                         type_name, root, cpv, operation = item
4482
4483                 existing = self._cpv_map.get(cpv)
4484                 if existing is not None and \
4485                         existing == item:
4486                         return existing
4487                 return default
4488
4489         def match_pkgs(self, atom):
4490                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4491
4492         def _clear_cache(self):
4493                 if self._categories is not None:
4494                         self._categories = None
4495                 if self._match_cache:
4496                         self._match_cache = {}
4497
4498         def match(self, origdep, use_cache=1):
4499                 result = self._match_cache.get(origdep)
4500                 if result is not None:
4501                         return result[:]
4502                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4503                 self._match_cache[origdep] = result
4504                 return result[:]
4505
4506         def cpv_exists(self, cpv):
4507                 return cpv in self._cpv_map
4508
4509         def cp_list(self, mycp, use_cache=1):
4510                 cachelist = self._match_cache.get(mycp)
4511                 # cp_list() doesn't expand old-style virtuals
4512                 if cachelist and cachelist[0].startswith(mycp):
4513                         return cachelist[:]
4514                 cpv_list = self._cp_map.get(mycp)
4515                 if cpv_list is None:
4516                         cpv_list = []
4517                 else:
4518                         cpv_list = [pkg.cpv for pkg in cpv_list]
4519                 self._cpv_sort_ascending(cpv_list)
4520                 if not (not cpv_list and mycp.startswith("virtual/")):
4521                         self._match_cache[mycp] = cpv_list
4522                 return cpv_list[:]
4523
4524         def cp_all(self):
4525                 return list(self._cp_map)
4526
4527         def cpv_all(self):
4528                 return list(self._cpv_map)
4529
4530         def cpv_inject(self, pkg):
4531                 cp_list = self._cp_map.get(pkg.cp)
4532                 if cp_list is None:
4533                         cp_list = []
4534                         self._cp_map[pkg.cp] = cp_list
4535                 e_pkg = self._cpv_map.get(pkg.cpv)
4536                 if e_pkg is not None:
4537                         if e_pkg == pkg:
4538                                 return
4539                         self.cpv_remove(e_pkg)
4540                 for e_pkg in cp_list:
4541                         if e_pkg.slot_atom == pkg.slot_atom:
4542                                 if e_pkg == pkg:
4543                                         return
4544                                 self.cpv_remove(e_pkg)
4545                                 break
4546                 cp_list.append(pkg)
4547                 self._cpv_map[pkg.cpv] = pkg
4548                 self._clear_cache()
4549
4550         def cpv_remove(self, pkg):
4551                 old_pkg = self._cpv_map.get(pkg.cpv)
4552                 if old_pkg != pkg:
4553                         raise KeyError(pkg)
4554                 self._cp_map[pkg.cp].remove(pkg)
4555                 del self._cpv_map[pkg.cpv]
4556                 self._clear_cache()
4557
4558         def aux_get(self, cpv, wants):
4559                 metadata = self._cpv_map[cpv].metadata
4560                 return [metadata.get(x, "") for x in wants]
4561
4562         def aux_update(self, cpv, values):
4563                 self._cpv_map[cpv].metadata.update(values)
4564                 self._clear_cache()
4565
4566 class depgraph(object):
4567
4568         pkg_tree_map = RootConfig.pkg_tree_map
4569
4570         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4571
4572         def __init__(self, settings, trees, myopts, myparams, spinner):
4573                 self.settings = settings
4574                 self.target_root = settings["ROOT"]
4575                 self.myopts = myopts
4576                 self.myparams = myparams
4577                 self.edebug = 0
4578                 if settings.get("PORTAGE_DEBUG", "") == "1":
4579                         self.edebug = 1
4580                 self.spinner = spinner
4581                 self._running_root = trees["/"]["root_config"]
4582                 self._opts_no_restart = Scheduler._opts_no_restart
4583                 self.pkgsettings = {}
4584                 # Maps slot atom to package for each Package added to the graph.
4585                 self._slot_pkg_map = {}
4586                 # Maps nodes to the reasons they were selected for reinstallation.
4587                 self._reinstall_nodes = {}
4588                 self.mydbapi = {}
4589                 self.trees = {}
4590                 self._trees_orig = trees
4591                 self.roots = {}
4592                 # Contains a filtered view of preferred packages that are selected
4593                 # from available repositories.
4594                 self._filtered_trees = {}
4595                 # Contains installed packages and new packages that have been added
4596                 # to the graph.
4597                 self._graph_trees = {}
4598                 # All Package instances
4599                 self._pkg_cache = {}
4600                 for myroot in trees:
4601                         self.trees[myroot] = {}
4602                         # Create a RootConfig instance that references
4603                         # the FakeVartree instead of the real one.
4604                         self.roots[myroot] = RootConfig(
4605                                 trees[myroot]["vartree"].settings,
4606                                 self.trees[myroot],
4607                                 trees[myroot]["root_config"].setconfig)
4608                         for tree in ("porttree", "bintree"):
4609                                 self.trees[myroot][tree] = trees[myroot][tree]
4610                         self.trees[myroot]["vartree"] = \
4611                                 FakeVartree(trees[myroot]["root_config"],
4612                                         pkg_cache=self._pkg_cache)
4613                         self.pkgsettings[myroot] = portage.config(
4614                                 clone=self.trees[myroot]["vartree"].settings)
4615                         self._slot_pkg_map[myroot] = {}
4616                         vardb = self.trees[myroot]["vartree"].dbapi
4617                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4618                                 "--buildpkgonly" not in self.myopts
4619                         # This fakedbapi instance will model the state that the vdb will
4620                         # have after new packages have been installed.
4621                         fakedb = PackageVirtualDbapi(vardb.settings)
4622                         if preload_installed_pkgs:
4623                                 for pkg in vardb:
4624                                         self.spinner.update()
4625                                         # This triggers metadata updates via FakeVartree.
4626                                         vardb.aux_get(pkg.cpv, [])
4627                                         fakedb.cpv_inject(pkg)
4628
4629                         # Now that the vardb state is cached in our FakeVartree,
4630                         # we won't be needing the real vartree cache for awhile.
4631                         # To make some room on the heap, clear the vardbapi
4632                         # caches.
4633                         trees[myroot]["vartree"].dbapi._clear_cache()
4634                         gc.collect()
4635
4636                         self.mydbapi[myroot] = fakedb
4637                         def graph_tree():
4638                                 pass
4639                         graph_tree.dbapi = fakedb
4640                         self._graph_trees[myroot] = {}
4641                         self._filtered_trees[myroot] = {}
4642                         # Substitute the graph tree for the vartree in dep_check() since we
4643                         # want atom selections to be consistent with package selections
4644                         # have already been made.
4645                         self._graph_trees[myroot]["porttree"]   = graph_tree
4646                         self._graph_trees[myroot]["vartree"]    = graph_tree
4647                         def filtered_tree():
4648                                 pass
4649                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4650                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4651
4652                         # Passing in graph_tree as the vartree here could lead to better
4653                         # atom selections in some cases by causing atoms for packages that
4654                         # have been added to the graph to be preferred over other choices.
4655                         # However, it can trigger atom selections that result in
4656                         # unresolvable direct circular dependencies. For example, this
4657                         # happens with gwydion-dylan which depends on either itself or
4658                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4659                         # gwydion-dylan-bin needs to be selected in order to avoid a
4660                         # an unresolvable direct circular dependency.
4661                         #
4662                         # To solve the problem described above, pass in "graph_db" so that
4663                         # packages that have been added to the graph are distinguishable
4664                         # from other available packages and installed packages. Also, pass
4665                         # the parent package into self._select_atoms() calls so that
4666                         # unresolvable direct circular dependencies can be detected and
4667                         # avoided when possible.
4668                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4669                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4670
4671                         dbs = []
4672                         portdb = self.trees[myroot]["porttree"].dbapi
4673                         bindb  = self.trees[myroot]["bintree"].dbapi
4674                         vardb  = self.trees[myroot]["vartree"].dbapi
4675                         #               (db, pkg_type, built, installed, db_keys)
4676                         if "--usepkgonly" not in self.myopts:
4677                                 db_keys = list(portdb._aux_cache_keys)
4678                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4679                         if "--usepkg" in self.myopts:
4680                                 db_keys = list(bindb._aux_cache_keys)
4681                                 dbs.append((bindb,  "binary", True, False, db_keys))
4682                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4683                         dbs.append((vardb, "installed", True, True, db_keys))
4684                         self._filtered_trees[myroot]["dbs"] = dbs
4685                         if "--usepkg" in self.myopts:
4686                                 self.trees[myroot]["bintree"].populate(
4687                                         "--getbinpkg" in self.myopts,
4688                                         "--getbinpkgonly" in self.myopts)
4689                 del trees
4690
4691                 self.digraph=portage.digraph()
4692                 # contains all sets added to the graph
4693                 self._sets = {}
4694                 # contains atoms given as arguments
4695                 self._sets["args"] = InternalPackageSet()
4696                 # contains all atoms from all sets added to the graph, including
4697                 # atoms given as arguments
4698                 self._set_atoms = InternalPackageSet()
4699                 self._atom_arg_map = {}
4700                 # contains all nodes pulled in by self._set_atoms
4701                 self._set_nodes = set()
4702                 # Contains only Blocker -> Uninstall edges
4703                 self._blocker_uninstalls = digraph()
4704                 # Contains only Package -> Blocker edges
4705                 self._blocker_parents = digraph()
4706                 # Contains only irrelevant Package -> Blocker edges
4707                 self._irrelevant_blockers = digraph()
4708                 # Contains only unsolvable Package -> Blocker edges
4709                 self._unsolvable_blockers = digraph()
4710                 # Contains all Blocker -> Blocked Package edges
4711                 self._blocked_pkgs = digraph()
4712                 # Contains world packages that have been protected from
4713                 # uninstallation but may not have been added to the graph
4714                 # if the graph is not complete yet.
4715                 self._blocked_world_pkgs = {}
4716                 self._slot_collision_info = {}
4717                 # Slot collision nodes are not allowed to block other packages since
4718                 # blocker validation is only able to account for one package per slot.
4719                 self._slot_collision_nodes = set()
4720                 self._parent_atoms = {}
4721                 self._slot_conflict_parent_atoms = set()
4722                 self._serialized_tasks_cache = None
4723                 self._scheduler_graph = None
4724                 self._displayed_list = None
4725                 self._pprovided_args = []
4726                 self._missing_args = []
4727                 self._masked_installed = set()
4728                 self._unsatisfied_deps_for_display = []
4729                 self._unsatisfied_blockers_for_display = None
4730                 self._circular_deps_for_display = None
4731                 self._dep_stack = []
4732                 self._unsatisfied_deps = []
4733                 self._initially_unsatisfied_deps = []
4734                 self._ignored_deps = []
4735                 self._required_set_names = set(["system", "world"])
4736                 self._select_atoms = self._select_atoms_highest_available
4737                 self._select_package = self._select_pkg_highest_available
4738                 self._highest_pkg_cache = {}
4739
4740         def _show_slot_collision_notice(self):
4741                 """Show an informational message advising the user to mask one of the
4742                 the packages. In some cases it may be possible to resolve this
4743                 automatically, but support for backtracking (removal nodes that have
4744                 already been selected) will be required in order to handle all possible
4745                 cases.
4746                 """
4747
4748                 if not self._slot_collision_info:
4749                         return
4750
4751                 self._show_merge_list()
4752
4753                 msg = []
4754                 msg.append("\n!!! Multiple package instances within a single " + \
4755                         "package slot have been pulled\n")
4756                 msg.append("!!! into the dependency graph, resulting" + \
4757                         " in a slot conflict:\n\n")
4758                 indent = "  "
4759                 # Max number of parents shown, to avoid flooding the display.
4760                 max_parents = 3
4761                 explanation_columns = 70
4762                 explanations = 0
4763                 for (slot_atom, root), slot_nodes \
4764                         in self._slot_collision_info.iteritems():
4765                         msg.append(str(slot_atom))
4766                         msg.append("\n\n")
4767
4768                         for node in slot_nodes:
4769                                 msg.append(indent)
4770                                 msg.append(str(node))
4771                                 parent_atoms = self._parent_atoms.get(node)
4772                                 if parent_atoms:
4773                                         pruned_list = set()
4774                                         # Prefer conflict atoms over others.
4775                                         for parent_atom in parent_atoms:
4776                                                 if len(pruned_list) >= max_parents:
4777                                                         break
4778                                                 if parent_atom in self._slot_conflict_parent_atoms:
4779                                                         pruned_list.add(parent_atom)
4780
4781                                         # If this package was pulled in by conflict atoms then
4782                                         # show those alone since those are the most interesting.
4783                                         if not pruned_list:
4784                                                 # When generating the pruned list, prefer instances
4785                                                 # of DependencyArg over instances of Package.
4786                                                 for parent_atom in parent_atoms:
4787                                                         if len(pruned_list) >= max_parents:
4788                                                                 break
4789                                                         parent, atom = parent_atom
4790                                                         if isinstance(parent, DependencyArg):
4791                                                                 pruned_list.add(parent_atom)
4792                                                 # Prefer Packages instances that themselves have been
4793                                                 # pulled into collision slots.
4794                                                 for parent_atom in parent_atoms:
4795                                                         if len(pruned_list) >= max_parents:
4796                                                                 break
4797                                                         parent, atom = parent_atom
4798                                                         if isinstance(parent, Package) and \
4799                                                                 (parent.slot_atom, parent.root) \
4800                                                                 in self._slot_collision_info:
4801                                                                 pruned_list.add(parent_atom)
4802                                                 for parent_atom in parent_atoms:
4803                                                         if len(pruned_list) >= max_parents:
4804                                                                 break
4805                                                         pruned_list.add(parent_atom)
4806                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4807                                         parent_atoms = pruned_list
4808                                         msg.append(" pulled in by\n")
4809                                         for parent_atom in parent_atoms:
4810                                                 parent, atom = parent_atom
4811                                                 msg.append(2*indent)
4812                                                 if isinstance(parent,
4813                                                         (PackageArg, AtomArg)):
4814                                                         # For PackageArg and AtomArg types, it's
4815                                                         # redundant to display the atom attribute.
4816                                                         msg.append(str(parent))
4817                                                 else:
4818                                                         # Display the specific atom from SetArg or
4819                                                         # Package types.
4820                                                         msg.append("%s required by %s" % (atom, parent))
4821                                                 msg.append("\n")
4822                                         if omitted_parents:
4823                                                 msg.append(2*indent)
4824                                                 msg.append("(and %d more)\n" % omitted_parents)
4825                                 else:
4826                                         msg.append(" (no parents)\n")
4827                                 msg.append("\n")
4828                         explanation = self._slot_conflict_explanation(slot_nodes)
4829                         if explanation:
4830                                 explanations += 1
4831                                 msg.append(indent + "Explanation:\n\n")
4832                                 for line in textwrap.wrap(explanation, explanation_columns):
4833                                         msg.append(2*indent + line + "\n")
4834                                 msg.append("\n")
4835                 msg.append("\n")
4836                 sys.stderr.write("".join(msg))
4837                 sys.stderr.flush()
4838
4839                 explanations_for_all = explanations == len(self._slot_collision_info)
4840
4841                 if explanations_for_all or "--quiet" in self.myopts:
4842                         return
4843
4844                 msg = []
4845                 msg.append("It may be possible to solve this problem ")
4846                 msg.append("by using package.mask to prevent one of ")
4847                 msg.append("those packages from being selected. ")
4848                 msg.append("However, it is also possible that conflicting ")
4849                 msg.append("dependencies exist such that they are impossible to ")
4850                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4851                 msg.append("the dependencies of two different packages, then those ")
4852                 msg.append("packages can not be installed simultaneously.")
4853
4854                 from formatter import AbstractFormatter, DumbWriter
4855                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4856                 for x in msg:
4857                         f.add_flowing_data(x)
4858                 f.end_paragraph(1)
4859
4860                 msg = []
4861                 msg.append("For more information, see MASKED PACKAGES ")
4862                 msg.append("section in the emerge man page or refer ")
4863                 msg.append("to the Gentoo Handbook.")
4864                 for x in msg:
4865                         f.add_flowing_data(x)
4866                 f.end_paragraph(1)
4867                 f.writer.flush()
4868
4869         def _slot_conflict_explanation(self, slot_nodes):
4870                 """
4871                 When a slot conflict occurs due to USE deps, there are a few
4872                 different cases to consider:
4873
4874                 1) New USE are correctly set but --newuse wasn't requested so an
4875                    installed package with incorrect USE happened to get pulled
4876                    into graph before the new one.
4877
4878                 2) New USE are incorrectly set but an installed package has correct
4879                    USE so it got pulled into the graph, and a new instance also got
4880                    pulled in due to --newuse or an upgrade.
4881
4882                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4883                    and multiple package instances got pulled into the same slot to
4884                    satisfy the conflicting deps.
4885
4886                 Currently, explanations and suggested courses of action are generated
4887                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4888                 """
4889
4890                 if len(slot_nodes) != 2:
4891                         # Suggestions are only implemented for
4892                         # conflicts between two packages.
4893                         return None
4894
4895                 all_conflict_atoms = self._slot_conflict_parent_atoms
4896                 matched_node = None
4897                 matched_atoms = None
4898                 unmatched_node = None
4899                 for node in slot_nodes:
4900                         parent_atoms = self._parent_atoms.get(node)
4901                         if not parent_atoms:
4902                                 # Normally, there are always parent atoms. If there are
4903                                 # none then something unexpected is happening and there's
4904                                 # currently no suggestion for this case.
4905                                 return None
4906                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4907                         for parent_atom in conflict_atoms:
4908                                 parent, atom = parent_atom
4909                                 if not atom.use:
4910                                         # Suggestions are currently only implemented for cases
4911                                         # in which all conflict atoms have USE deps.
4912                                         return None
4913                         if conflict_atoms:
4914                                 if matched_node is not None:
4915                                         # If conflict atoms match multiple nodes
4916                                         # then there's no suggestion.
4917                                         return None
4918                                 matched_node = node
4919                                 matched_atoms = conflict_atoms
4920                         else:
4921                                 if unmatched_node is not None:
4922                                         # Neither node is matched by conflict atoms, and
4923                                         # there is no suggestion for this case.
4924                                         return None
4925                                 unmatched_node = node
4926
4927                 if matched_node is None or unmatched_node is None:
4928                         # This shouldn't happen.
4929                         return None
4930
4931                 if unmatched_node.installed and not matched_node.installed and \
4932                         unmatched_node.cpv == matched_node.cpv:
4933                         # If the conflicting packages are the same version then
4934                         # --newuse should be all that's needed. If they are different
4935                         # versions then there's some other problem.
4936                         return "New USE are correctly set, but --newuse wasn't" + \
4937                                 " requested, so an installed package with incorrect USE " + \
4938                                 "happened to get pulled into the dependency graph. " + \
4939                                 "In order to solve " + \
4940                                 "this, either specify the --newuse option or explicitly " + \
4941                                 " reinstall '%s'." % matched_node.slot_atom
4942
4943                 if matched_node.installed and not unmatched_node.installed:
4944                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4945                         explanation = ("New USE for '%s' are incorrectly set. " + \
4946                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4947                                 (matched_node.slot_atom, atoms[0])
4948                         if len(atoms) > 1:
4949                                 for atom in atoms[1:-1]:
4950                                         explanation += ", '%s'" % (atom,)
4951                                 if len(atoms) > 2:
4952                                         explanation += ","
4953                                 explanation += " and '%s'" % (atoms[-1],)
4954                         explanation += "."
4955                         return explanation
4956
4957                 return None
4958
4959         def _process_slot_conflicts(self):
4960                 """
4961                 Process slot conflict data to identify specific atoms which
4962                 lead to conflict. These atoms only match a subset of the
4963                 packages that have been pulled into a given slot.
4964                 """
4965                 for (slot_atom, root), slot_nodes \
4966                         in self._slot_collision_info.iteritems():
4967
4968                         all_parent_atoms = set()
4969                         for pkg in slot_nodes:
4970                                 parent_atoms = self._parent_atoms.get(pkg)
4971                                 if not parent_atoms:
4972                                         continue
4973                                 all_parent_atoms.update(parent_atoms)
4974
4975                         for pkg in slot_nodes:
4976                                 parent_atoms = self._parent_atoms.get(pkg)
4977                                 if parent_atoms is None:
4978                                         parent_atoms = set()
4979                                         self._parent_atoms[pkg] = parent_atoms
4980                                 for parent_atom in all_parent_atoms:
4981                                         if parent_atom in parent_atoms:
4982                                                 continue
4983                                         # Use package set for matching since it will match via
4984                                         # PROVIDE when necessary, while match_from_list does not.
4985                                         parent, atom = parent_atom
4986                                         atom_set = InternalPackageSet(
4987                                                 initial_atoms=(atom,))
4988                                         if atom_set.findAtomForPackage(pkg):
4989                                                 parent_atoms.add(parent_atom)
4990                                         else:
4991                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4992
4993         def _reinstall_for_flags(self, forced_flags,
4994                 orig_use, orig_iuse, cur_use, cur_iuse):
4995                 """Return a set of flags that trigger reinstallation, or None if there
4996                 are no such flags."""
4997                 if "--newuse" in self.myopts:
4998                         flags = set(orig_iuse.symmetric_difference(
4999                                 cur_iuse).difference(forced_flags))
5000                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5001                                 cur_iuse.intersection(cur_use)))
5002                         if flags:
5003                                 return flags
5004                 elif "changed-use" == self.myopts.get("--reinstall"):
5005                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
5006                                 cur_iuse.intersection(cur_use))
5007                         if flags:
5008                                 return flags
5009                 return None
5010
5011         def _create_graph(self, allow_unsatisfied=False):
5012                 dep_stack = self._dep_stack
5013                 while dep_stack:
5014                         self.spinner.update()
5015                         dep = dep_stack.pop()
5016                         if isinstance(dep, Package):
5017                                 if not self._add_pkg_deps(dep,
5018                                         allow_unsatisfied=allow_unsatisfied):
5019                                         return 0
5020                                 continue
5021                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5022                                 return 0
5023                 return 1
5024
5025         def _add_dep(self, dep, allow_unsatisfied=False):
5026                 debug = "--debug" in self.myopts
5027                 buildpkgonly = "--buildpkgonly" in self.myopts
5028                 nodeps = "--nodeps" in self.myopts
5029                 empty = "empty" in self.myparams
5030                 deep = "deep" in self.myparams
5031                 update = "--update" in self.myopts and dep.depth <= 1
5032                 if dep.blocker:
5033                         if not buildpkgonly and \
5034                                 not nodeps and \
5035                                 dep.parent not in self._slot_collision_nodes:
5036                                 if dep.parent.onlydeps:
5037                                         # It's safe to ignore blockers if the
5038                                         # parent is an --onlydeps node.
5039                                         return 1
5040                                 # The blocker applies to the root where
5041                                 # the parent is or will be installed.
5042                                 blocker = Blocker(atom=dep.atom,
5043                                         eapi=dep.parent.metadata["EAPI"],
5044                                         root=dep.parent.root)
5045                                 self._blocker_parents.add(blocker, dep.parent)
5046                         return 1
5047                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5048                         onlydeps=dep.onlydeps)
5049                 if not dep_pkg:
5050                         if dep.priority.optional:
5051                                 # This could be an unecessary build-time dep
5052                                 # pulled in by --with-bdeps=y.
5053                                 return 1
5054                         if allow_unsatisfied:
5055                                 self._unsatisfied_deps.append(dep)
5056                                 return 1
5057                         self._unsatisfied_deps_for_display.append(
5058                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5059                         return 0
5060                 # In some cases, dep_check will return deps that shouldn't
5061                 # be proccessed any further, so they are identified and
5062                 # discarded here. Try to discard as few as possible since
5063                 # discarded dependencies reduce the amount of information
5064                 # available for optimization of merge order.
5065                 if dep.priority.satisfied and \
5066                         not dep_pkg.installed and \
5067                         not (existing_node or empty or deep or update):
5068                         myarg = None
5069                         if dep.root == self.target_root:
5070                                 try:
5071                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5072                                 except StopIteration:
5073                                         pass
5074                                 except portage.exception.InvalidDependString:
5075                                         if not dep_pkg.installed:
5076                                                 # This shouldn't happen since the package
5077                                                 # should have been masked.
5078                                                 raise
5079                         if not myarg:
5080                                 self._ignored_deps.append(dep)
5081                                 return 1
5082
5083                 if not self._add_pkg(dep_pkg, dep):
5084                         return 0
5085                 return 1
5086
5087         def _add_pkg(self, pkg, dep):
5088                 myparent = None
5089                 priority = None
5090                 depth = 0
5091                 if dep is None:
5092                         dep = Dependency()
5093                 else:
5094                         myparent = dep.parent
5095                         priority = dep.priority
5096                         depth = dep.depth
5097                 if priority is None:
5098                         priority = DepPriority()
5099                 """
5100                 Fills the digraph with nodes comprised of packages to merge.
5101                 mybigkey is the package spec of the package to merge.
5102                 myparent is the package depending on mybigkey ( or None )
5103                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5104                         Think --onlydeps, we need to ignore packages in that case.
5105                 #stuff to add:
5106                 #SLOT-aware emerge
5107                 #IUSE-aware emerge -> USE DEP aware depgraph
5108                 #"no downgrade" emerge
5109                 """
5110                 # Ensure that the dependencies of the same package
5111                 # are never processed more than once.
5112                 previously_added = pkg in self.digraph
5113
5114                 # select the correct /var database that we'll be checking against
5115                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5116                 pkgsettings = self.pkgsettings[pkg.root]
5117
5118                 arg_atoms = None
5119                 if True:
5120                         try:
5121                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5122                         except portage.exception.InvalidDependString, e:
5123                                 if not pkg.installed:
5124                                         show_invalid_depstring_notice(
5125                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5126                                         return 0
5127                                 del e
5128
5129                 if not pkg.onlydeps:
5130                         if not pkg.installed and \
5131                                 "empty" not in self.myparams and \
5132                                 vardbapi.match(pkg.slot_atom):
5133                                 # Increase the priority of dependencies on packages that
5134                                 # are being rebuilt. This optimizes merge order so that
5135                                 # dependencies are rebuilt/updated as soon as possible,
5136                                 # which is needed especially when emerge is called by
5137                                 # revdep-rebuild since dependencies may be affected by ABI
5138                                 # breakage that has rendered them useless. Don't adjust
5139                                 # priority here when in "empty" mode since all packages
5140                                 # are being merged in that case.
5141                                 priority.rebuild = True
5142
5143                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5144                         slot_collision = False
5145                         if existing_node:
5146                                 existing_node_matches = pkg.cpv == existing_node.cpv
5147                                 if existing_node_matches and \
5148                                         pkg != existing_node and \
5149                                         dep.atom is not None:
5150                                         # Use package set for matching since it will match via
5151                                         # PROVIDE when necessary, while match_from_list does not.
5152                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5153                                         if not atom_set.findAtomForPackage(existing_node):
5154                                                 existing_node_matches = False
5155                                 if existing_node_matches:
5156                                         # The existing node can be reused.
5157                                         if arg_atoms:
5158                                                 for parent_atom in arg_atoms:
5159                                                         parent, atom = parent_atom
5160                                                         self.digraph.add(existing_node, parent,
5161                                                                 priority=priority)
5162                                                         self._add_parent_atom(existing_node, parent_atom)
5163                                         # If a direct circular dependency is not an unsatisfied
5164                                         # buildtime dependency then drop it here since otherwise
5165                                         # it can skew the merge order calculation in an unwanted
5166                                         # way.
5167                                         if existing_node != myparent or \
5168                                                 (priority.buildtime and not priority.satisfied):
5169                                                 self.digraph.addnode(existing_node, myparent,
5170                                                         priority=priority)
5171                                                 if dep.atom is not None and dep.parent is not None:
5172                                                         self._add_parent_atom(existing_node,
5173                                                                 (dep.parent, dep.atom))
5174                                         return 1
5175                                 else:
5176
5177                                         # A slot collision has occurred.  Sometimes this coincides
5178                                         # with unresolvable blockers, so the slot collision will be
5179                                         # shown later if there are no unresolvable blockers.
5180                                         self._add_slot_conflict(pkg)
5181                                         slot_collision = True
5182
5183                         if slot_collision:
5184                                 # Now add this node to the graph so that self.display()
5185                                 # can show use flags and --tree portage.output.  This node is
5186                                 # only being partially added to the graph.  It must not be
5187                                 # allowed to interfere with the other nodes that have been
5188                                 # added.  Do not overwrite data for existing nodes in
5189                                 # self.mydbapi since that data will be used for blocker
5190                                 # validation.
5191                                 # Even though the graph is now invalid, continue to process
5192                                 # dependencies so that things like --fetchonly can still
5193                                 # function despite collisions.
5194                                 pass
5195                         elif not previously_added:
5196                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5197                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5198                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5199
5200                         if not pkg.installed:
5201                                 # Allow this package to satisfy old-style virtuals in case it
5202                                 # doesn't already. Any pre-existing providers will be preferred
5203                                 # over this one.
5204                                 try:
5205                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5206                                         # For consistency, also update the global virtuals.
5207                                         settings = self.roots[pkg.root].settings
5208                                         settings.unlock()
5209                                         settings.setinst(pkg.cpv, pkg.metadata)
5210                                         settings.lock()
5211                                 except portage.exception.InvalidDependString, e:
5212                                         show_invalid_depstring_notice(
5213                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5214                                         del e
5215                                         return 0
5216
5217                 if arg_atoms:
5218                         self._set_nodes.add(pkg)
5219
5220                 # Do this even when addme is False (--onlydeps) so that the
5221                 # parent/child relationship is always known in case
5222                 # self._show_slot_collision_notice() needs to be called later.
5223                 self.digraph.add(pkg, myparent, priority=priority)
5224                 if dep.atom is not None and dep.parent is not None:
5225                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5226
5227                 if arg_atoms:
5228                         for parent_atom in arg_atoms:
5229                                 parent, atom = parent_atom
5230                                 self.digraph.add(pkg, parent, priority=priority)
5231                                 self._add_parent_atom(pkg, parent_atom)
5232
5233                 """ This section determines whether we go deeper into dependencies or not.
5234                     We want to go deeper on a few occasions:
5235                     Installing package A, we need to make sure package A's deps are met.
5236                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5237                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5238                 """
5239                 dep_stack = self._dep_stack
5240                 if "recurse" not in self.myparams:
5241                         return 1
5242                 elif pkg.installed and \
5243                         "deep" not in self.myparams:
5244                         dep_stack = self._ignored_deps
5245
5246                 self.spinner.update()
5247
5248                 if arg_atoms:
5249                         depth = 0
5250                 pkg.depth = depth
5251                 if not previously_added:
5252                         dep_stack.append(pkg)
5253                 return 1
5254
5255         def _add_parent_atom(self, pkg, parent_atom):
5256                 parent_atoms = self._parent_atoms.get(pkg)
5257                 if parent_atoms is None:
5258                         parent_atoms = set()
5259                         self._parent_atoms[pkg] = parent_atoms
5260                 parent_atoms.add(parent_atom)
5261
5262         def _add_slot_conflict(self, pkg):
5263                 self._slot_collision_nodes.add(pkg)
5264                 slot_key = (pkg.slot_atom, pkg.root)
5265                 slot_nodes = self._slot_collision_info.get(slot_key)
5266                 if slot_nodes is None:
5267                         slot_nodes = set()
5268                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5269                         self._slot_collision_info[slot_key] = slot_nodes
5270                 slot_nodes.add(pkg)
5271
5272         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5273
5274                 mytype = pkg.type_name
5275                 myroot = pkg.root
5276                 mykey = pkg.cpv
5277                 metadata = pkg.metadata
5278                 myuse = pkg.use.enabled
5279                 jbigkey = pkg
5280                 depth = pkg.depth + 1
5281                 removal_action = "remove" in self.myparams
5282
5283                 edepend={}
5284                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5285                 for k in depkeys:
5286                         edepend[k] = metadata[k]
5287
5288                 if not pkg.built and \
5289                         "--buildpkgonly" in self.myopts and \
5290                         "deep" not in self.myparams and \
5291                         "empty" not in self.myparams:
5292                         edepend["RDEPEND"] = ""
5293                         edepend["PDEPEND"] = ""
5294                 bdeps_optional = False
5295
5296                 if pkg.built and not removal_action:
5297                         if self.myopts.get("--with-bdeps", "n") == "y":
5298                                 # Pull in build time deps as requested, but marked them as
5299                                 # "optional" since they are not strictly required. This allows
5300                                 # more freedom in the merge order calculation for solving
5301                                 # circular dependencies. Don't convert to PDEPEND since that
5302                                 # could make --with-bdeps=y less effective if it is used to
5303                                 # adjust merge order to prevent built_with_use() calls from
5304                                 # failing.
5305                                 bdeps_optional = True
5306                         else:
5307                                 # built packages do not have build time dependencies.
5308                                 edepend["DEPEND"] = ""
5309
5310                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5311                         edepend["DEPEND"] = ""
5312
5313                 bdeps_root = "/"
5314                 root_deps = self.myopts.get("--root-deps")
5315                 if root_deps is not None:
5316                         if root_deps is True:
5317                                 bdeps_root = myroot
5318                         elif root_deps == "rdeps":
5319                                 edepend["DEPEND"] = ""
5320
5321                 deps = (
5322                         (bdeps_root, edepend["DEPEND"],
5323                                 self._priority(buildtime=(not bdeps_optional),
5324                                 optional=bdeps_optional)),
5325                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5326                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5327                 )
5328
5329                 debug = "--debug" in self.myopts
5330                 strict = mytype != "installed"
5331                 try:
5332                         for dep_root, dep_string, dep_priority in deps:
5333                                 if not dep_string:
5334                                         continue
5335                                 if debug:
5336                                         print
5337                                         print "Parent:   ", jbigkey
5338                                         print "Depstring:", dep_string
5339                                         print "Priority:", dep_priority
5340                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5341                                 try:
5342                                         selected_atoms = self._select_atoms(dep_root,
5343                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5344                                                 priority=dep_priority)
5345                                 except portage.exception.InvalidDependString, e:
5346                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5347                                         return 0
5348                                 if debug:
5349                                         print "Candidates:", selected_atoms
5350
5351                                 for atom in selected_atoms:
5352                                         try:
5353
5354                                                 atom = portage.dep.Atom(atom)
5355
5356                                                 mypriority = dep_priority.copy()
5357                                                 if not atom.blocker and vardb.match(atom):
5358                                                         mypriority.satisfied = True
5359
5360                                                 if not self._add_dep(Dependency(atom=atom,
5361                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5362                                                         priority=mypriority, root=dep_root),
5363                                                         allow_unsatisfied=allow_unsatisfied):
5364                                                         return 0
5365
5366                                         except portage.exception.InvalidAtom, e:
5367                                                 show_invalid_depstring_notice(
5368                                                         pkg, dep_string, str(e))
5369                                                 del e
5370                                                 if not pkg.installed:
5371                                                         return 0
5372
5373                                 if debug:
5374                                         print "Exiting...", jbigkey
5375                 except portage.exception.AmbiguousPackageName, e:
5376                         pkgs = e.args[0]
5377                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5378                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5379                         for cpv in pkgs:
5380                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5381                         portage.writemsg("\n", noiselevel=-1)
5382                         if mytype == "binary":
5383                                 portage.writemsg(
5384                                         "!!! This binary package cannot be installed: '%s'\n" % \
5385                                         mykey, noiselevel=-1)
5386                         elif mytype == "ebuild":
5387                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5388                                 myebuild, mylocation = portdb.findname2(mykey)
5389                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5390                                         "'%s'\n" % myebuild, noiselevel=-1)
5391                         portage.writemsg("!!! Please notify the package maintainer " + \
5392                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5393                         return 0
5394                 return 1
5395
5396         def _priority(self, **kwargs):
5397                 if "remove" in self.myparams:
5398                         priority_constructor = UnmergeDepPriority
5399                 else:
5400                         priority_constructor = DepPriority
5401                 return priority_constructor(**kwargs)
5402
5403         def _dep_expand(self, root_config, atom_without_category):
5404                 """
5405                 @param root_config: a root config instance
5406                 @type root_config: RootConfig
5407                 @param atom_without_category: an atom without a category component
5408                 @type atom_without_category: String
5409                 @rtype: list
5410                 @returns: a list of atoms containing categories (possibly empty)
5411                 """
5412                 null_cp = portage.dep_getkey(insert_category_into_atom(
5413                         atom_without_category, "null"))
5414                 cat, atom_pn = portage.catsplit(null_cp)
5415
5416                 dbs = self._filtered_trees[root_config.root]["dbs"]
5417                 categories = set()
5418                 for db, pkg_type, built, installed, db_keys in dbs:
5419                         for cat in db.categories:
5420                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5421                                         categories.add(cat)
5422
5423                 deps = []
5424                 for cat in categories:
5425                         deps.append(insert_category_into_atom(
5426                                 atom_without_category, cat))
5427                 return deps
5428
5429         def _have_new_virt(self, root, atom_cp):
5430                 ret = False
5431                 for db, pkg_type, built, installed, db_keys in \
5432                         self._filtered_trees[root]["dbs"]:
5433                         if db.cp_list(atom_cp):
5434                                 ret = True
5435                                 break
5436                 return ret
5437
5438         def _iter_atoms_for_pkg(self, pkg):
5439                 # TODO: add multiple $ROOT support
5440                 if pkg.root != self.target_root:
5441                         return
5442                 atom_arg_map = self._atom_arg_map
5443                 root_config = self.roots[pkg.root]
5444                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5445                         atom_cp = portage.dep_getkey(atom)
5446                         if atom_cp != pkg.cp and \
5447                                 self._have_new_virt(pkg.root, atom_cp):
5448                                 continue
5449                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5450                         visible_pkgs.reverse() # descending order
5451                         higher_slot = None
5452                         for visible_pkg in visible_pkgs:
5453                                 if visible_pkg.cp != atom_cp:
5454                                         continue
5455                                 if pkg >= visible_pkg:
5456                                         # This is descending order, and we're not
5457                                         # interested in any versions <= pkg given.
5458                                         break
5459                                 if pkg.slot_atom != visible_pkg.slot_atom:
5460                                         higher_slot = visible_pkg
5461                                         break
5462                         if higher_slot is not None:
5463                                 continue
5464                         for arg in atom_arg_map[(atom, pkg.root)]:
5465                                 if isinstance(arg, PackageArg) and \
5466                                         arg.package != pkg:
5467                                         continue
5468                                 yield arg, atom
5469
5470         def select_files(self, myfiles):
5471                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5472                 appropriate depgraph and return a favorite list."""
5473                 debug = "--debug" in self.myopts
5474                 root_config = self.roots[self.target_root]
5475                 sets = root_config.sets
5476                 getSetAtoms = root_config.setconfig.getSetAtoms
5477                 myfavorites=[]
5478                 myroot = self.target_root
5479                 dbs = self._filtered_trees[myroot]["dbs"]
5480                 vardb = self.trees[myroot]["vartree"].dbapi
5481                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5482                 portdb = self.trees[myroot]["porttree"].dbapi
5483                 bindb = self.trees[myroot]["bintree"].dbapi
5484                 pkgsettings = self.pkgsettings[myroot]
5485                 args = []
5486                 onlydeps = "--onlydeps" in self.myopts
5487                 lookup_owners = []
5488                 for x in myfiles:
5489                         ext = os.path.splitext(x)[1]
5490                         if ext==".tbz2":
5491                                 if not os.path.exists(x):
5492                                         if os.path.exists(
5493                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5494                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5495                                         elif os.path.exists(
5496                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5497                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5498                                         else:
5499                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5500                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5501                                                 return 0, myfavorites
5502                                 mytbz2=portage.xpak.tbz2(x)
5503                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5504                                 if os.path.realpath(x) != \
5505                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5506                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5507                                         return 0, myfavorites
5508                                 db_keys = list(bindb._aux_cache_keys)
5509                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5510                                 pkg = Package(type_name="binary", root_config=root_config,
5511                                         cpv=mykey, built=True, metadata=metadata,
5512                                         onlydeps=onlydeps)
5513                                 self._pkg_cache[pkg] = pkg
5514                                 args.append(PackageArg(arg=x, package=pkg,
5515                                         root_config=root_config))
5516                         elif ext==".ebuild":
5517                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5518                                 pkgdir = os.path.dirname(ebuild_path)
5519                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5520                                 cp = pkgdir[len(tree_root)+1:]
5521                                 e = portage.exception.PackageNotFound(
5522                                         ("%s is not in a valid portage tree " + \
5523                                         "hierarchy or does not exist") % x)
5524                                 if not portage.isvalidatom(cp):
5525                                         raise e
5526                                 cat = portage.catsplit(cp)[0]
5527                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5528                                 if not portage.isvalidatom("="+mykey):
5529                                         raise e
5530                                 ebuild_path = portdb.findname(mykey)
5531                                 if ebuild_path:
5532                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5533                                                 cp, os.path.basename(ebuild_path)):
5534                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5535                                                 return 0, myfavorites
5536                                         if mykey not in portdb.xmatch(
5537                                                 "match-visible", portage.dep_getkey(mykey)):
5538                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5539                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5540                                                 print colorize("BAD", "*** page for details.")
5541                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5542                                                         "Continuing...")
5543                                 else:
5544                                         raise portage.exception.PackageNotFound(
5545                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5546                                 db_keys = list(portdb._aux_cache_keys)
5547                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5548                                 pkg = Package(type_name="ebuild", root_config=root_config,
5549                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5550                                 pkgsettings.setcpv(pkg)
5551                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5552                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5553                                 self._pkg_cache[pkg] = pkg
5554                                 args.append(PackageArg(arg=x, package=pkg,
5555                                         root_config=root_config))
5556                         elif x.startswith(os.path.sep):
5557                                 if not x.startswith(myroot):
5558                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5559                                                 " $ROOT.\n") % x, noiselevel=-1)
5560                                         return 0, []
5561                                 # Queue these up since it's most efficient to handle
5562                                 # multiple files in a single iter_owners() call.
5563                                 lookup_owners.append(x)
5564                         else:
5565                                 if x in ("system", "world"):
5566                                         x = SETPREFIX + x
5567                                 if x.startswith(SETPREFIX):
5568                                         s = x[len(SETPREFIX):]
5569                                         if s not in sets:
5570                                                 raise portage.exception.PackageSetNotFound(s)
5571                                         if s in self._sets:
5572                                                 continue
5573                                         # Recursively expand sets so that containment tests in
5574                                         # self._get_parent_sets() properly match atoms in nested
5575                                         # sets (like if world contains system).
5576                                         expanded_set = InternalPackageSet(
5577                                                 initial_atoms=getSetAtoms(s))
5578                                         self._sets[s] = expanded_set
5579                                         args.append(SetArg(arg=x, set=expanded_set,
5580                                                 root_config=root_config))
5581                                         continue
5582                                 if not is_valid_package_atom(x):
5583                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5584                                                 noiselevel=-1)
5585                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5586                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5587                                         return (0,[])
5588                                 # Don't expand categories or old-style virtuals here unless
5589                                 # necessary. Expansion of old-style virtuals here causes at
5590                                 # least the following problems:
5591                                 #   1) It's more difficult to determine which set(s) an atom
5592                                 #      came from, if any.
5593                                 #   2) It takes away freedom from the resolver to choose other
5594                                 #      possible expansions when necessary.
5595                                 if "/" in x:
5596                                         args.append(AtomArg(arg=x, atom=x,
5597                                                 root_config=root_config))
5598                                         continue
5599                                 expanded_atoms = self._dep_expand(root_config, x)
5600                                 installed_cp_set = set()
5601                                 for atom in expanded_atoms:
5602                                         atom_cp = portage.dep_getkey(atom)
5603                                         if vardb.cp_list(atom_cp):
5604                                                 installed_cp_set.add(atom_cp)
5605                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5606                                         installed_cp = iter(installed_cp_set).next()
5607                                         expanded_atoms = [atom for atom in expanded_atoms \
5608                                                 if portage.dep_getkey(atom) == installed_cp]
5609
5610                                 if len(expanded_atoms) > 1:
5611                                         print
5612                                         print
5613                                         ambiguous_package_name(x, expanded_atoms, root_config,
5614                                                 self.spinner, self.myopts)
5615                                         return False, myfavorites
5616                                 if expanded_atoms:
5617                                         atom = expanded_atoms[0]
5618                                 else:
5619                                         null_atom = insert_category_into_atom(x, "null")
5620                                         null_cp = portage.dep_getkey(null_atom)
5621                                         cat, atom_pn = portage.catsplit(null_cp)
5622                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5623                                         if virts_p:
5624                                                 # Allow the depgraph to choose which virtual.
5625                                                 atom = insert_category_into_atom(x, "virtual")
5626                                         else:
5627                                                 atom = insert_category_into_atom(x, "null")
5628
5629                                 args.append(AtomArg(arg=x, atom=atom,
5630                                         root_config=root_config))
5631
5632                 if lookup_owners:
5633                         relative_paths = []
5634                         search_for_multiple = False
5635                         if len(lookup_owners) > 1:
5636                                 search_for_multiple = True
5637
5638                         for x in lookup_owners:
5639                                 if not search_for_multiple and os.path.isdir(x):
5640                                         search_for_multiple = True
5641                                 relative_paths.append(x[len(myroot):])
5642
5643                         owners = set()
5644                         for pkg, relative_path in \
5645                                 real_vardb._owners.iter_owners(relative_paths):
5646                                 owners.add(pkg.mycpv)
5647                                 if not search_for_multiple:
5648                                         break
5649
5650                         if not owners:
5651                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5652                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5653                                 return 0, []
5654
5655                         for cpv in owners:
5656                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5657                                 if not slot:
5658                                         # portage now masks packages with missing slot, but it's
5659                                         # possible that one was installed by an older version
5660                                         atom = portage.cpv_getkey(cpv)
5661                                 else:
5662                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5663                                 args.append(AtomArg(arg=atom, atom=atom,
5664                                         root_config=root_config))
5665
5666                 if "--update" in self.myopts:
5667                         # In some cases, the greedy slots behavior can pull in a slot that
5668                         # the user would want to uninstall due to it being blocked by a
5669                         # newer version in a different slot. Therefore, it's necessary to
5670                         # detect and discard any that should be uninstalled. Each time
5671                         # that arguments are updated, package selections are repeated in
5672                         # order to ensure consistency with the current arguments:
5673                         #
5674                         #  1) Initialize args
5675                         #  2) Select packages and generate initial greedy atoms
5676                         #  3) Update args with greedy atoms
5677                         #  4) Select packages and generate greedy atoms again, while
5678                         #     accounting for any blockers between selected packages
5679                         #  5) Update args with revised greedy atoms
5680
5681                         self._set_args(args)
5682                         greedy_args = []
5683                         for arg in args:
5684                                 greedy_args.append(arg)
5685                                 if not isinstance(arg, AtomArg):
5686                                         continue
5687                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5688                                         greedy_args.append(
5689                                                 AtomArg(arg=arg.arg, atom=atom,
5690                                                         root_config=arg.root_config))
5691
5692                         self._set_args(greedy_args)
5693                         del greedy_args
5694
5695                         # Revise greedy atoms, accounting for any blockers
5696                         # between selected packages.
5697                         revised_greedy_args = []
5698                         for arg in args:
5699                                 revised_greedy_args.append(arg)
5700                                 if not isinstance(arg, AtomArg):
5701                                         continue
5702                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5703                                         blocker_lookahead=True):
5704                                         revised_greedy_args.append(
5705                                                 AtomArg(arg=arg.arg, atom=atom,
5706                                                         root_config=arg.root_config))
5707                         args = revised_greedy_args
5708                         del revised_greedy_args
5709
5710                 self._set_args(args)
5711
5712                 myfavorites = set(myfavorites)
5713                 for arg in args:
5714                         if isinstance(arg, (AtomArg, PackageArg)):
5715                                 myfavorites.add(arg.atom)
5716                         elif isinstance(arg, SetArg):
5717                                 myfavorites.add(arg.arg)
5718                 myfavorites = list(myfavorites)
5719
5720                 pprovideddict = pkgsettings.pprovideddict
5721                 if debug:
5722                         portage.writemsg("\n", noiselevel=-1)
5723                 # Order needs to be preserved since a feature of --nodeps
5724                 # is to allow the user to force a specific merge order.
5725                 args.reverse()
5726                 while args:
5727                         arg = args.pop()
5728                         for atom in arg.set:
5729                                 self.spinner.update()
5730                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5731                                         root=myroot, parent=arg)
5732                                 atom_cp = portage.dep_getkey(atom)
5733                                 try:
5734                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5735                                         if pprovided and portage.match_from_list(atom, pprovided):
5736                                                 # A provided package has been specified on the command line.
5737                                                 self._pprovided_args.append((arg, atom))
5738                                                 continue
5739                                         if isinstance(arg, PackageArg):
5740                                                 if not self._add_pkg(arg.package, dep) or \
5741                                                         not self._create_graph():
5742                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5743                                                                 "dependencies for %s\n") % arg.arg)
5744                                                         return 0, myfavorites
5745                                                 continue
5746                                         if debug:
5747                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5748                                                         (arg, atom), noiselevel=-1)
5749                                         pkg, existing_node = self._select_package(
5750                                                 myroot, atom, onlydeps=onlydeps)
5751                                         if not pkg:
5752                                                 if not (isinstance(arg, SetArg) and \
5753                                                         arg.name in ("system", "world")):
5754                                                         self._unsatisfied_deps_for_display.append(
5755                                                                 ((myroot, atom), {}))
5756                                                         return 0, myfavorites
5757                                                 self._missing_args.append((arg, atom))
5758                                                 continue
5759                                         if atom_cp != pkg.cp:
5760                                                 # For old-style virtuals, we need to repeat the
5761                                                 # package.provided check against the selected package.
5762                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5763                                                 pprovided = pprovideddict.get(pkg.cp)
5764                                                 if pprovided and \
5765                                                         portage.match_from_list(expanded_atom, pprovided):
5766                                                         # A provided package has been
5767                                                         # specified on the command line.
5768                                                         self._pprovided_args.append((arg, atom))
5769                                                         continue
5770                                         if pkg.installed and "selective" not in self.myparams:
5771                                                 self._unsatisfied_deps_for_display.append(
5772                                                         ((myroot, atom), {}))
5773                                                 # Previous behavior was to bail out in this case, but
5774                                                 # since the dep is satisfied by the installed package,
5775                                                 # it's more friendly to continue building the graph
5776                                                 # and just show a warning message. Therefore, only bail
5777                                                 # out here if the atom is not from either the system or
5778                                                 # world set.
5779                                                 if not (isinstance(arg, SetArg) and \
5780                                                         arg.name in ("system", "world")):
5781                                                         return 0, myfavorites
5782
5783                                         # Add the selected package to the graph as soon as possible
5784                                         # so that later dep_check() calls can use it as feedback
5785                                         # for making more consistent atom selections.
5786                                         if not self._add_pkg(pkg, dep):
5787                                                 if isinstance(arg, SetArg):
5788                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5789                                                                 "dependencies for %s from %s\n") % \
5790                                                                 (atom, arg.arg))
5791                                                 else:
5792                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5793                                                                 "dependencies for %s\n") % atom)
5794                                                 return 0, myfavorites
5795
5796                                 except portage.exception.MissingSignature, e:
5797                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5798                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5799                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5800                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5801                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5802                                         return 0, myfavorites
5803                                 except portage.exception.InvalidSignature, e:
5804                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5805                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5806                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5807                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5808                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5809                                         return 0, myfavorites
5810                                 except SystemExit, e:
5811                                         raise # Needed else can't exit
5812                                 except Exception, e:
5813                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5814                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5815                                         raise
5816
5817                 # Now that the root packages have been added to the graph,
5818                 # process the dependencies.
5819                 if not self._create_graph():
5820                         return 0, myfavorites
5821
5822                 missing=0
5823                 if "--usepkgonly" in self.myopts:
5824                         for xs in self.digraph.all_nodes():
5825                                 if not isinstance(xs, Package):
5826                                         continue
5827                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5828                                         if missing == 0:
5829                                                 print
5830                                         missing += 1
5831                                         print "Missing binary for:",xs[2]
5832
5833                 try:
5834                         self.altlist()
5835                 except self._unknown_internal_error:
5836                         return False, myfavorites
5837
5838                 # We're true here unless we are missing binaries.
5839                 return (not missing,myfavorites)
5840
5841         def _set_args(self, args):
5842                 """
5843                 Create the "args" package set from atoms and packages given as
5844                 arguments. This method can be called multiple times if necessary.
5845                 The package selection cache is automatically invalidated, since
5846                 arguments influence package selections.
5847                 """
5848                 args_set = self._sets["args"]
5849                 args_set.clear()
5850                 for arg in args:
5851                         if not isinstance(arg, (AtomArg, PackageArg)):
5852                                 continue
5853                         atom = arg.atom
5854                         if atom in args_set:
5855                                 continue
5856                         args_set.add(atom)
5857
5858                 self._set_atoms.clear()
5859                 self._set_atoms.update(chain(*self._sets.itervalues()))
5860                 atom_arg_map = self._atom_arg_map
5861                 atom_arg_map.clear()
5862                 for arg in args:
5863                         for atom in arg.set:
5864                                 atom_key = (atom, arg.root_config.root)
5865                                 refs = atom_arg_map.get(atom_key)
5866                                 if refs is None:
5867                                         refs = []
5868                                         atom_arg_map[atom_key] = refs
5869                                         if arg not in refs:
5870                                                 refs.append(arg)
5871
5872                 # Invalidate the package selection cache, since
5873                 # arguments influence package selections.
5874                 self._highest_pkg_cache.clear()
5875                 for trees in self._filtered_trees.itervalues():
5876                         trees["porttree"].dbapi._clear_cache()
5877
5878         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5879                 """
5880                 Return a list of slot atoms corresponding to installed slots that
5881                 differ from the slot of the highest visible match. When
5882                 blocker_lookahead is True, slot atoms that would trigger a blocker
5883                 conflict are automatically discarded, potentially allowing automatic
5884                 uninstallation of older slots when appropriate.
5885                 """
5886                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5887                 if highest_pkg is None:
5888                         return []
5889                 vardb = root_config.trees["vartree"].dbapi
5890                 slots = set()
5891                 for cpv in vardb.match(atom):
5892                         # don't mix new virtuals with old virtuals
5893                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5894                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5895
5896                 slots.add(highest_pkg.metadata["SLOT"])
5897                 if len(slots) == 1:
5898                         return []
5899                 greedy_pkgs = []
5900                 slots.remove(highest_pkg.metadata["SLOT"])
5901                 while slots:
5902                         slot = slots.pop()
5903                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5904                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5905                         if pkg is not None and \
5906                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5907                                 greedy_pkgs.append(pkg)
5908                 if not greedy_pkgs:
5909                         return []
5910                 if not blocker_lookahead:
5911                         return [pkg.slot_atom for pkg in greedy_pkgs]
5912
5913                 blockers = {}
5914                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5915                 for pkg in greedy_pkgs + [highest_pkg]:
5916                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5917                         try:
5918                                 atoms = self._select_atoms(
5919                                         pkg.root, dep_str, pkg.use.enabled,
5920                                         parent=pkg, strict=True)
5921                         except portage.exception.InvalidDependString:
5922                                 continue
5923                         blocker_atoms = (x for x in atoms if x.blocker)
5924                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5925
5926                 if highest_pkg not in blockers:
5927                         return []
5928
5929                 # filter packages with invalid deps
5930                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5931
5932                 # filter packages that conflict with highest_pkg
5933                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5934                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5935                         blockers[pkg].findAtomForPackage(highest_pkg))]
5936
5937                 if not greedy_pkgs:
5938                         return []
5939
5940                 # If two packages conflict, discard the lower version.
5941                 discard_pkgs = set()
5942                 greedy_pkgs.sort(reverse=True)
5943                 for i in xrange(len(greedy_pkgs) - 1):
5944                         pkg1 = greedy_pkgs[i]
5945                         if pkg1 in discard_pkgs:
5946                                 continue
5947                         for j in xrange(i + 1, len(greedy_pkgs)):
5948                                 pkg2 = greedy_pkgs[j]
5949                                 if pkg2 in discard_pkgs:
5950                                         continue
5951                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5952                                         blockers[pkg2].findAtomForPackage(pkg1):
5953                                         # pkg1 > pkg2
5954                                         discard_pkgs.add(pkg2)
5955
5956                 return [pkg.slot_atom for pkg in greedy_pkgs \
5957                         if pkg not in discard_pkgs]
5958
5959         def _select_atoms_from_graph(self, *pargs, **kwargs):
5960                 """
5961                 Prefer atoms matching packages that have already been
5962                 added to the graph or those that are installed and have
5963                 not been scheduled for replacement.
5964                 """
5965                 kwargs["trees"] = self._graph_trees
5966                 return self._select_atoms_highest_available(*pargs, **kwargs)
5967
5968         def _select_atoms_highest_available(self, root, depstring,
5969                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5970                 """This will raise InvalidDependString if necessary. If trees is
5971                 None then self._filtered_trees is used."""
5972                 pkgsettings = self.pkgsettings[root]
5973                 if trees is None:
5974                         trees = self._filtered_trees
5975                 if not getattr(priority, "buildtime", False):
5976                         # The parent should only be passed to dep_check() for buildtime
5977                         # dependencies since that's the only case when it's appropriate
5978                         # to trigger the circular dependency avoidance code which uses it.
5979                         # It's important not to trigger the same circular dependency
5980                         # avoidance code for runtime dependencies since it's not needed
5981                         # and it can promote an incorrect package choice.
5982                         parent = None
5983                 if True:
5984                         try:
5985                                 if parent is not None:
5986                                         trees[root]["parent"] = parent
5987                                 if not strict:
5988                                         portage.dep._dep_check_strict = False
5989                                 mycheck = portage.dep_check(depstring, None,
5990                                         pkgsettings, myuse=myuse,
5991                                         myroot=root, trees=trees)
5992                         finally:
5993                                 if parent is not None:
5994                                         trees[root].pop("parent")
5995                                 portage.dep._dep_check_strict = True
5996                         if not mycheck[0]:
5997                                 raise portage.exception.InvalidDependString(mycheck[1])
5998                         selected_atoms = mycheck[1]
5999                 return selected_atoms
6000
6001         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6002                 atom = portage.dep.Atom(atom)
6003                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6004                 atom_without_use = atom
6005                 if atom.use:
6006                         atom_without_use = portage.dep.remove_slot(atom)
6007                         if atom.slot:
6008                                 atom_without_use += ":" + atom.slot
6009                         atom_without_use = portage.dep.Atom(atom_without_use)
6010                 xinfo = '"%s"' % atom
6011                 if arg:
6012                         xinfo='"%s"' % arg
6013                 # Discard null/ from failed cpv_expand category expansion.
6014                 xinfo = xinfo.replace("null/", "")
6015                 masked_packages = []
6016                 missing_use = []
6017                 masked_pkg_instances = set()
6018                 missing_licenses = []
6019                 have_eapi_mask = False
6020                 pkgsettings = self.pkgsettings[root]
6021                 implicit_iuse = pkgsettings._get_implicit_iuse()
6022                 root_config = self.roots[root]
6023                 portdb = self.roots[root].trees["porttree"].dbapi
6024                 dbs = self._filtered_trees[root]["dbs"]
6025                 for db, pkg_type, built, installed, db_keys in dbs:
6026                         if installed:
6027                                 continue
6028                         match = db.match
6029                         if hasattr(db, "xmatch"):
6030                                 cpv_list = db.xmatch("match-all", atom_without_use)
6031                         else:
6032                                 cpv_list = db.match(atom_without_use)
6033                         # descending order
6034                         cpv_list.reverse()
6035                         for cpv in cpv_list:
6036                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6037                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6038                                 if metadata is not None:
6039                                         pkg = Package(built=built, cpv=cpv,
6040                                                 installed=installed, metadata=metadata,
6041                                                 root_config=root_config)
6042                                         if pkg.cp != atom.cp:
6043                                                 # A cpv can be returned from dbapi.match() as an
6044                                                 # old-style virtual match even in cases when the
6045                                                 # package does not actually PROVIDE the virtual.
6046                                                 # Filter out any such false matches here.
6047                                                 if not atom_set.findAtomForPackage(pkg):
6048                                                         continue
6049                                         if mreasons:
6050                                                 masked_pkg_instances.add(pkg)
6051                                         if atom.use:
6052                                                 missing_use.append(pkg)
6053                                                 if not mreasons:
6054                                                         continue
6055                                 masked_packages.append(
6056                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6057
6058                 missing_use_reasons = []
6059                 missing_iuse_reasons = []
6060                 for pkg in missing_use:
6061                         use = pkg.use.enabled
6062                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6063                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6064                         missing_iuse = []
6065                         for x in atom.use.required:
6066                                 if iuse_re.match(x) is None:
6067                                         missing_iuse.append(x)
6068                         mreasons = []
6069                         if missing_iuse:
6070                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6071                                 missing_iuse_reasons.append((pkg, mreasons))
6072                         else:
6073                                 need_enable = sorted(atom.use.enabled.difference(use))
6074                                 need_disable = sorted(atom.use.disabled.intersection(use))
6075                                 if need_enable or need_disable:
6076                                         changes = []
6077                                         changes.extend(colorize("red", "+" + x) \
6078                                                 for x in need_enable)
6079                                         changes.extend(colorize("blue", "-" + x) \
6080                                                 for x in need_disable)
6081                                         mreasons.append("Change USE: %s" % " ".join(changes))
6082                                         missing_use_reasons.append((pkg, mreasons))
6083
6084                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6085                         in missing_use_reasons if pkg not in masked_pkg_instances]
6086
6087                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6088                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6089
6090                 show_missing_use = False
6091                 if unmasked_use_reasons:
6092                         # Only show the latest version.
6093                         show_missing_use = unmasked_use_reasons[:1]
6094                 elif unmasked_iuse_reasons:
6095                         if missing_use_reasons:
6096                                 # All packages with required IUSE are masked,
6097                                 # so display a normal masking message.
6098                                 pass
6099                         else:
6100                                 show_missing_use = unmasked_iuse_reasons
6101
6102                 if show_missing_use:
6103                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6104                         print "!!! One of the following packages is required to complete your request:"
6105                         for pkg, mreasons in show_missing_use:
6106                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6107
6108                 elif masked_packages:
6109                         print "\n!!! " + \
6110                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6111                                 colorize("INFORM", xinfo) + \
6112                                 colorize("BAD", " have been masked.")
6113                         print "!!! One of the following masked packages is required to complete your request:"
6114                         have_eapi_mask = show_masked_packages(masked_packages)
6115                         if have_eapi_mask:
6116                                 print
6117                                 msg = ("The current version of portage supports " + \
6118                                         "EAPI '%s'. You must upgrade to a newer version" + \
6119                                         " of portage before EAPI masked packages can" + \
6120                                         " be installed.") % portage.const.EAPI
6121                                 from textwrap import wrap
6122                                 for line in wrap(msg, 75):
6123                                         print line
6124                         print
6125                         show_mask_docs()
6126                 else:
6127                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6128
6129                 # Show parent nodes and the argument that pulled them in.
6130                 traversed_nodes = set()
6131                 node = myparent
6132                 msg = []
6133                 while node is not None:
6134                         traversed_nodes.add(node)
6135                         msg.append('(dependency required by "%s" [%s])' % \
6136                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6137                         # When traversing to parents, prefer arguments over packages
6138                         # since arguments are root nodes. Never traverse the same
6139                         # package twice, in order to prevent an infinite loop.
6140                         selected_parent = None
6141                         for parent in self.digraph.parent_nodes(node):
6142                                 if isinstance(parent, DependencyArg):
6143                                         msg.append('(dependency required by "%s" [argument])' % \
6144                                                 (colorize('INFORM', str(parent))))
6145                                         selected_parent = None
6146                                         break
6147                                 if parent not in traversed_nodes:
6148                                         selected_parent = parent
6149                         node = selected_parent
6150                 for line in msg:
6151                         print line
6152
6153                 print
6154
6155         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6156                 cache_key = (root, atom, onlydeps)
6157                 ret = self._highest_pkg_cache.get(cache_key)
6158                 if ret is not None:
6159                         pkg, existing = ret
6160                         if pkg and not existing:
6161                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6162                                 if existing and existing == pkg:
6163                                         # Update the cache to reflect that the
6164                                         # package has been added to the graph.
6165                                         ret = pkg, pkg
6166                                         self._highest_pkg_cache[cache_key] = ret
6167                         return ret
6168                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6169                 self._highest_pkg_cache[cache_key] = ret
6170                 pkg, existing = ret
6171                 if pkg is not None:
6172                         settings = pkg.root_config.settings
6173                         if visible(settings, pkg) and not (pkg.installed and \
6174                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6175                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6176                 return ret
6177
6178         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6179                 root_config = self.roots[root]
6180                 pkgsettings = self.pkgsettings[root]
6181                 dbs = self._filtered_trees[root]["dbs"]
6182                 vardb = self.roots[root].trees["vartree"].dbapi
6183                 portdb = self.roots[root].trees["porttree"].dbapi
6184                 # List of acceptable packages, ordered by type preference.
6185                 matched_packages = []
6186                 highest_version = None
6187                 if not isinstance(atom, portage.dep.Atom):
6188                         atom = portage.dep.Atom(atom)
6189                 atom_cp = atom.cp
6190                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6191                 existing_node = None
6192                 myeb = None
6193                 usepkgonly = "--usepkgonly" in self.myopts
6194                 empty = "empty" in self.myparams
6195                 selective = "selective" in self.myparams
6196                 reinstall = False
6197                 noreplace = "--noreplace" in self.myopts
6198                 # Behavior of the "selective" parameter depends on
6199                 # whether or not a package matches an argument atom.
6200                 # If an installed package provides an old-style
6201                 # virtual that is no longer provided by an available
6202                 # package, the installed package may match an argument
6203                 # atom even though none of the available packages do.
6204                 # Therefore, "selective" logic does not consider
6205                 # whether or not an installed package matches an
6206                 # argument atom. It only considers whether or not
6207                 # available packages match argument atoms, which is
6208                 # represented by the found_available_arg flag.
6209                 found_available_arg = False
6210                 for find_existing_node in True, False:
6211                         if existing_node:
6212                                 break
6213                         for db, pkg_type, built, installed, db_keys in dbs:
6214                                 if existing_node:
6215                                         break
6216                                 if installed and not find_existing_node:
6217                                         want_reinstall = reinstall or empty or \
6218                                                 (found_available_arg and not selective)
6219                                         if want_reinstall and matched_packages:
6220                                                 continue
6221                                 if hasattr(db, "xmatch"):
6222                                         cpv_list = db.xmatch("match-all", atom)
6223                                 else:
6224                                         cpv_list = db.match(atom)
6225
6226                                 # USE=multislot can make an installed package appear as if
6227                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6228                                 # won't do any good as long as USE=multislot is enabled since
6229                                 # the newly built package still won't have the expected slot.
6230                                 # Therefore, assume that such SLOT dependencies are already
6231                                 # satisfied rather than forcing a rebuild.
6232                                 if installed and not cpv_list and atom.slot:
6233                                         for cpv in db.match(atom.cp):
6234                                                 slot_available = False
6235                                                 for other_db, other_type, other_built, \
6236                                                         other_installed, other_keys in dbs:
6237                                                         try:
6238                                                                 if atom.slot == \
6239                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6240                                                                         slot_available = True
6241                                                                         break
6242                                                         except KeyError:
6243                                                                 pass
6244                                                 if not slot_available:
6245                                                         continue
6246                                                 inst_pkg = self._pkg(cpv, "installed",
6247                                                         root_config, installed=installed)
6248                                                 # Remove the slot from the atom and verify that
6249                                                 # the package matches the resulting atom.
6250                                                 atom_without_slot = portage.dep.remove_slot(atom)
6251                                                 if atom.use:
6252                                                         atom_without_slot += str(atom.use)
6253                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6254                                                 if portage.match_from_list(
6255                                                         atom_without_slot, [inst_pkg]):
6256                                                         cpv_list = [inst_pkg.cpv]
6257                                                 break
6258
6259                                 if not cpv_list:
6260                                         continue
6261                                 pkg_status = "merge"
6262                                 if installed or onlydeps:
6263                                         pkg_status = "nomerge"
6264                                 # descending order
6265                                 cpv_list.reverse()
6266                                 for cpv in cpv_list:
6267                                         # Make --noreplace take precedence over --newuse.
6268                                         if not installed and noreplace and \
6269                                                 cpv in vardb.match(atom):
6270                                                 # If the installed version is masked, it may
6271                                                 # be necessary to look at lower versions,
6272                                                 # in case there is a visible downgrade.
6273                                                 continue
6274                                         reinstall_for_flags = None
6275                                         cache_key = (pkg_type, root, cpv, pkg_status)
6276                                         calculated_use = True
6277                                         pkg = self._pkg_cache.get(cache_key)
6278                                         if pkg is None:
6279                                                 calculated_use = False
6280                                                 try:
6281                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6282                                                 except KeyError:
6283                                                         continue
6284                                                 pkg = Package(built=built, cpv=cpv,
6285                                                         installed=installed, metadata=metadata,
6286                                                         onlydeps=onlydeps, root_config=root_config,
6287                                                         type_name=pkg_type)
6288                                                 metadata = pkg.metadata
6289                                                 if not built:
6290                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6291                                                 if not built and ("?" in metadata["LICENSE"] or \
6292                                                         "?" in metadata["PROVIDE"]):
6293                                                         # This is avoided whenever possible because
6294                                                         # it's expensive. It only needs to be done here
6295                                                         # if it has an effect on visibility.
6296                                                         pkgsettings.setcpv(pkg)
6297                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6298                                                         calculated_use = True
6299                                                 self._pkg_cache[pkg] = pkg
6300
6301                                         if not installed or (built and matched_packages):
6302                                                 # Only enforce visibility on installed packages
6303                                                 # if there is at least one other visible package
6304                                                 # available. By filtering installed masked packages
6305                                                 # here, packages that have been masked since they
6306                                                 # were installed can be automatically downgraded
6307                                                 # to an unmasked version.
6308                                                 try:
6309                                                         if not visible(pkgsettings, pkg):
6310                                                                 continue
6311                                                 except portage.exception.InvalidDependString:
6312                                                         if not installed:
6313                                                                 continue
6314
6315                                                 # Enable upgrade or downgrade to a version
6316                                                 # with visible KEYWORDS when the installed
6317                                                 # version is masked by KEYWORDS, but never
6318                                                 # reinstall the same exact version only due
6319                                                 # to a KEYWORDS mask.
6320                                                 if built and matched_packages:
6321
6322                                                         different_version = None
6323                                                         for avail_pkg in matched_packages:
6324                                                                 if not portage.dep.cpvequal(
6325                                                                         pkg.cpv, avail_pkg.cpv):
6326                                                                         different_version = avail_pkg
6327                                                                         break
6328                                                         if different_version is not None:
6329
6330                                                                 if installed and \
6331                                                                         pkgsettings._getMissingKeywords(
6332                                                                         pkg.cpv, pkg.metadata):
6333                                                                         continue
6334
6335                                                                 # If the ebuild no longer exists or it's
6336                                                                 # keywords have been dropped, reject built
6337                                                                 # instances (installed or binary).
6338                                                                 # If --usepkgonly is enabled, assume that
6339                                                                 # the ebuild status should be ignored.
6340                                                                 if not usepkgonly:
6341                                                                         try:
6342                                                                                 pkg_eb = self._pkg(
6343                                                                                         pkg.cpv, "ebuild", root_config)
6344                                                                         except portage.exception.PackageNotFound:
6345                                                                                 continue
6346                                                                         else:
6347                                                                                 if not visible(pkgsettings, pkg_eb):
6348                                                                                         continue
6349
6350                                         if not pkg.built and not calculated_use:
6351                                                 # This is avoided whenever possible because
6352                                                 # it's expensive.
6353                                                 pkgsettings.setcpv(pkg)
6354                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6355
6356                                         if pkg.cp != atom.cp:
6357                                                 # A cpv can be returned from dbapi.match() as an
6358                                                 # old-style virtual match even in cases when the
6359                                                 # package does not actually PROVIDE the virtual.
6360                                                 # Filter out any such false matches here.
6361                                                 if not atom_set.findAtomForPackage(pkg):
6362                                                         continue
6363
6364                                         myarg = None
6365                                         if root == self.target_root:
6366                                                 try:
6367                                                         # Ebuild USE must have been calculated prior
6368                                                         # to this point, in case atoms have USE deps.
6369                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6370                                                 except StopIteration:
6371                                                         pass
6372                                                 except portage.exception.InvalidDependString:
6373                                                         if not installed:
6374                                                                 # masked by corruption
6375                                                                 continue
6376                                         if not installed and myarg:
6377                                                 found_available_arg = True
6378
6379                                         if atom.use and not pkg.built:
6380                                                 use = pkg.use.enabled
6381                                                 if atom.use.enabled.difference(use):
6382                                                         continue
6383                                                 if atom.use.disabled.intersection(use):
6384                                                         continue
6385                                         if pkg.cp == atom_cp:
6386                                                 if highest_version is None:
6387                                                         highest_version = pkg
6388                                                 elif pkg > highest_version:
6389                                                         highest_version = pkg
6390                                         # At this point, we've found the highest visible
6391                                         # match from the current repo. Any lower versions
6392                                         # from this repo are ignored, so this so the loop
6393                                         # will always end with a break statement below
6394                                         # this point.
6395                                         if find_existing_node:
6396                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6397                                                 if not e_pkg:
6398                                                         break
6399                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6400                                                         if highest_version and \
6401                                                                 e_pkg.cp == atom_cp and \
6402                                                                 e_pkg < highest_version and \
6403                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6404                                                                 # There is a higher version available in a
6405                                                                 # different slot, so this existing node is
6406                                                                 # irrelevant.
6407                                                                 pass
6408                                                         else:
6409                                                                 matched_packages.append(e_pkg)
6410                                                                 existing_node = e_pkg
6411                                                 break
6412                                         # Compare built package to current config and
6413                                         # reject the built package if necessary.
6414                                         if built and not installed and \
6415                                                 ("--newuse" in self.myopts or \
6416                                                 "--reinstall" in self.myopts):
6417                                                 iuses = pkg.iuse.all
6418                                                 old_use = pkg.use.enabled
6419                                                 if myeb:
6420                                                         pkgsettings.setcpv(myeb)
6421                                                 else:
6422                                                         pkgsettings.setcpv(pkg)
6423                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6424                                                 forced_flags = set()
6425                                                 forced_flags.update(pkgsettings.useforce)
6426                                                 forced_flags.update(pkgsettings.usemask)
6427                                                 cur_iuse = iuses
6428                                                 if myeb and not usepkgonly:
6429                                                         cur_iuse = myeb.iuse.all
6430                                                 if self._reinstall_for_flags(forced_flags,
6431                                                         old_use, iuses,
6432                                                         now_use, cur_iuse):
6433                                                         break
6434                                         # Compare current config to installed package
6435                                         # and do not reinstall if possible.
6436                                         if not installed and \
6437                                                 ("--newuse" in self.myopts or \
6438                                                 "--reinstall" in self.myopts) and \
6439                                                 cpv in vardb.match(atom):
6440                                                 pkgsettings.setcpv(pkg)
6441                                                 forced_flags = set()
6442                                                 forced_flags.update(pkgsettings.useforce)
6443                                                 forced_flags.update(pkgsettings.usemask)
6444                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6445                                                 old_iuse = set(filter_iuse_defaults(
6446                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6447                                                 cur_use = pkg.use.enabled
6448                                                 cur_iuse = pkg.iuse.all
6449                                                 reinstall_for_flags = \
6450                                                         self._reinstall_for_flags(
6451                                                         forced_flags, old_use, old_iuse,
6452                                                         cur_use, cur_iuse)
6453                                                 if reinstall_for_flags:
6454                                                         reinstall = True
6455                                         if not built:
6456                                                 myeb = pkg
6457                                         matched_packages.append(pkg)
6458                                         if reinstall_for_flags:
6459                                                 self._reinstall_nodes[pkg] = \
6460                                                         reinstall_for_flags
6461                                         break
6462
6463                 if not matched_packages:
6464                         return None, None
6465
6466                 if "--debug" in self.myopts:
6467                         for pkg in matched_packages:
6468                                 portage.writemsg("%s %s\n" % \
6469                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6470
6471                 # Filter out any old-style virtual matches if they are
6472                 # mixed with new-style virtual matches.
6473                 cp = portage.dep_getkey(atom)
6474                 if len(matched_packages) > 1 and \
6475                         "virtual" == portage.catsplit(cp)[0]:
6476                         for pkg in matched_packages:
6477                                 if pkg.cp != cp:
6478                                         continue
6479                                 # Got a new-style virtual, so filter
6480                                 # out any old-style virtuals.
6481                                 matched_packages = [pkg for pkg in matched_packages \
6482                                         if pkg.cp == cp]
6483                                 break
6484
6485                 if len(matched_packages) > 1:
6486                         bestmatch = portage.best(
6487                                 [pkg.cpv for pkg in matched_packages])
6488                         matched_packages = [pkg for pkg in matched_packages \
6489                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6490
6491                 # ordered by type preference ("ebuild" type is the last resort)
6492                 return  matched_packages[-1], existing_node
6493
6494         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6495                 """
6496                 Select packages that have already been added to the graph or
6497                 those that are installed and have not been scheduled for
6498                 replacement.
6499                 """
6500                 graph_db = self._graph_trees[root]["porttree"].dbapi
6501                 matches = graph_db.match_pkgs(atom)
6502                 if not matches:
6503                         return None, None
6504                 pkg = matches[-1] # highest match
6505                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6506                 return pkg, in_graph
6507
6508         def _complete_graph(self):
6509                 """
6510                 Add any deep dependencies of required sets (args, system, world) that
6511                 have not been pulled into the graph yet. This ensures that the graph
6512                 is consistent such that initially satisfied deep dependencies are not
6513                 broken in the new graph. Initially unsatisfied dependencies are
6514                 irrelevant since we only want to avoid breaking dependencies that are
6515                 intially satisfied.
6516
6517                 Since this method can consume enough time to disturb users, it is
6518                 currently only enabled by the --complete-graph option.
6519                 """
6520                 if "--buildpkgonly" in self.myopts or \
6521                         "recurse" not in self.myparams:
6522                         return 1
6523
6524                 if "complete" not in self.myparams:
6525                         # Skip this to avoid consuming enough time to disturb users.
6526                         return 1
6527
6528                 # Put the depgraph into a mode that causes it to only
6529                 # select packages that have already been added to the
6530                 # graph or those that are installed and have not been
6531                 # scheduled for replacement. Also, toggle the "deep"
6532                 # parameter so that all dependencies are traversed and
6533                 # accounted for.
6534                 self._select_atoms = self._select_atoms_from_graph
6535                 self._select_package = self._select_pkg_from_graph
6536                 already_deep = "deep" in self.myparams
6537                 if not already_deep:
6538                         self.myparams.add("deep")
6539
6540                 for root in self.roots:
6541                         required_set_names = self._required_set_names.copy()
6542                         if root == self.target_root and \
6543                                 (already_deep or "empty" in self.myparams):
6544                                 required_set_names.difference_update(self._sets)
6545                         if not required_set_names and not self._ignored_deps:
6546                                 continue
6547                         root_config = self.roots[root]
6548                         setconfig = root_config.setconfig
6549                         args = []
6550                         # Reuse existing SetArg instances when available.
6551                         for arg in self.digraph.root_nodes():
6552                                 if not isinstance(arg, SetArg):
6553                                         continue
6554                                 if arg.root_config != root_config:
6555                                         continue
6556                                 if arg.name in required_set_names:
6557                                         args.append(arg)
6558                                         required_set_names.remove(arg.name)
6559                         # Create new SetArg instances only when necessary.
6560                         for s in required_set_names:
6561                                 expanded_set = InternalPackageSet(
6562                                         initial_atoms=setconfig.getSetAtoms(s))
6563                                 atom = SETPREFIX + s
6564                                 args.append(SetArg(arg=atom, set=expanded_set,
6565                                         root_config=root_config))
6566                         vardb = root_config.trees["vartree"].dbapi
6567                         for arg in args:
6568                                 for atom in arg.set:
6569                                         self._dep_stack.append(
6570                                                 Dependency(atom=atom, root=root, parent=arg))
6571                         if self._ignored_deps:
6572                                 self._dep_stack.extend(self._ignored_deps)
6573                                 self._ignored_deps = []
6574                         if not self._create_graph(allow_unsatisfied=True):
6575                                 return 0
6576                         # Check the unsatisfied deps to see if any initially satisfied deps
6577                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6578                         # deps are irrelevant since we only want to avoid breaking deps
6579                         # that are initially satisfied.
6580                         while self._unsatisfied_deps:
6581                                 dep = self._unsatisfied_deps.pop()
6582                                 matches = vardb.match_pkgs(dep.atom)
6583                                 if not matches:
6584                                         self._initially_unsatisfied_deps.append(dep)
6585                                         continue
6586                                 # An scheduled installation broke a deep dependency.
6587                                 # Add the installed package to the graph so that it
6588                                 # will be appropriately reported as a slot collision
6589                                 # (possibly solvable via backtracking).
6590                                 pkg = matches[-1] # highest match
6591                                 if not self._add_pkg(pkg, dep):
6592                                         return 0
6593                                 if not self._create_graph(allow_unsatisfied=True):
6594                                         return 0
6595                 return 1
6596
6597         def _pkg(self, cpv, type_name, root_config, installed=False):
6598                 """
6599                 Get a package instance from the cache, or create a new
6600                 one if necessary. Raises KeyError from aux_get if it
6601                 failures for some reason (package does not exist or is
6602                 corrupt).
6603                 """
6604                 operation = "merge"
6605                 if installed:
6606                         operation = "nomerge"
6607                 pkg = self._pkg_cache.get(
6608                         (type_name, root_config.root, cpv, operation))
6609                 if pkg is None:
6610                         tree_type = self.pkg_tree_map[type_name]
6611                         db = root_config.trees[tree_type].dbapi
6612                         db_keys = list(self._trees_orig[root_config.root][
6613                                 tree_type].dbapi._aux_cache_keys)
6614                         try:
6615                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6616                         except KeyError:
6617                                 raise portage.exception.PackageNotFound(cpv)
6618                         pkg = Package(cpv=cpv, metadata=metadata,
6619                                 root_config=root_config, installed=installed)
6620                         if type_name == "ebuild":
6621                                 settings = self.pkgsettings[root_config.root]
6622                                 settings.setcpv(pkg)
6623                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6624                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6625                         self._pkg_cache[pkg] = pkg
6626                 return pkg
6627
6628         def validate_blockers(self):
6629                 """Remove any blockers from the digraph that do not match any of the
6630                 packages within the graph.  If necessary, create hard deps to ensure
6631                 correct merge order such that mutually blocking packages are never
6632                 installed simultaneously."""
6633
6634                 if "--buildpkgonly" in self.myopts or \
6635                         "--nodeps" in self.myopts:
6636                         return True
6637
6638                 #if "deep" in self.myparams:
6639                 if True:
6640                         # Pull in blockers from all installed packages that haven't already
6641                         # been pulled into the depgraph.  This is not enabled by default
6642                         # due to the performance penalty that is incurred by all the
6643                         # additional dep_check calls that are required.
6644
6645                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6646                         for myroot in self.trees:
6647                                 vardb = self.trees[myroot]["vartree"].dbapi
6648                                 portdb = self.trees[myroot]["porttree"].dbapi
6649                                 pkgsettings = self.pkgsettings[myroot]
6650                                 final_db = self.mydbapi[myroot]
6651
6652                                 blocker_cache = BlockerCache(myroot, vardb)
6653                                 stale_cache = set(blocker_cache)
6654                                 for pkg in vardb:
6655                                         cpv = pkg.cpv
6656                                         stale_cache.discard(cpv)
6657                                         pkg_in_graph = self.digraph.contains(pkg)
6658
6659                                         # Check for masked installed packages. Only warn about
6660                                         # packages that are in the graph in order to avoid warning
6661                                         # about those that will be automatically uninstalled during
6662                                         # the merge process or by --depclean.
6663                                         if pkg in final_db:
6664                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6665                                                         self._masked_installed.add(pkg)
6666
6667                                         blocker_atoms = None
6668                                         blockers = None
6669                                         if pkg_in_graph:
6670                                                 blockers = []
6671                                                 try:
6672                                                         blockers.extend(
6673                                                                 self._blocker_parents.child_nodes(pkg))
6674                                                 except KeyError:
6675                                                         pass
6676                                                 try:
6677                                                         blockers.extend(
6678                                                                 self._irrelevant_blockers.child_nodes(pkg))
6679                                                 except KeyError:
6680                                                         pass
6681                                         if blockers is not None:
6682                                                 blockers = set(str(blocker.atom) \
6683                                                         for blocker in blockers)
6684
6685                                         # If this node has any blockers, create a "nomerge"
6686                                         # node for it so that they can be enforced.
6687                                         self.spinner.update()
6688                                         blocker_data = blocker_cache.get(cpv)
6689                                         if blocker_data is not None and \
6690                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6691                                                 blocker_data = None
6692
6693                                         # If blocker data from the graph is available, use
6694                                         # it to validate the cache and update the cache if
6695                                         # it seems invalid.
6696                                         if blocker_data is not None and \
6697                                                 blockers is not None:
6698                                                 if not blockers.symmetric_difference(
6699                                                         blocker_data.atoms):
6700                                                         continue
6701                                                 blocker_data = None
6702
6703                                         if blocker_data is None and \
6704                                                 blockers is not None:
6705                                                 # Re-use the blockers from the graph.
6706                                                 blocker_atoms = sorted(blockers)
6707                                                 counter = long(pkg.metadata["COUNTER"])
6708                                                 blocker_data = \
6709                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6710                                                 blocker_cache[pkg.cpv] = blocker_data
6711                                                 continue
6712
6713                                         if blocker_data:
6714                                                 blocker_atoms = blocker_data.atoms
6715                                         else:
6716                                                 # Use aux_get() to trigger FakeVartree global
6717                                                 # updates on *DEPEND when appropriate.
6718                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6719                                                 # It is crucial to pass in final_db here in order to
6720                                                 # optimize dep_check calls by eliminating atoms via
6721                                                 # dep_wordreduce and dep_eval calls.
6722                                                 try:
6723                                                         portage.dep._dep_check_strict = False
6724                                                         try:
6725                                                                 success, atoms = portage.dep_check(depstr,
6726                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6727                                                                         trees=self._graph_trees, myroot=myroot)
6728                                                         except Exception, e:
6729                                                                 if isinstance(e, SystemExit):
6730                                                                         raise
6731                                                                 # This is helpful, for example, if a ValueError
6732                                                                 # is thrown from cpv_expand due to multiple
6733                                                                 # matches (this can happen if an atom lacks a
6734                                                                 # category).
6735                                                                 show_invalid_depstring_notice(
6736                                                                         pkg, depstr, str(e))
6737                                                                 del e
6738                                                                 raise
6739                                                 finally:
6740                                                         portage.dep._dep_check_strict = True
6741                                                 if not success:
6742                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6743                                                         if replacement_pkg and \
6744                                                                 replacement_pkg[0].operation == "merge":
6745                                                                 # This package is being replaced anyway, so
6746                                                                 # ignore invalid dependencies so as not to
6747                                                                 # annoy the user too much (otherwise they'd be
6748                                                                 # forced to manually unmerge it first).
6749                                                                 continue
6750                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6751                                                         return False
6752                                                 blocker_atoms = [myatom for myatom in atoms \
6753                                                         if myatom.startswith("!")]
6754                                                 blocker_atoms.sort()
6755                                                 counter = long(pkg.metadata["COUNTER"])
6756                                                 blocker_cache[cpv] = \
6757                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6758                                         if blocker_atoms:
6759                                                 try:
6760                                                         for atom in blocker_atoms:
6761                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6762                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6763                                                                 self._blocker_parents.add(blocker, pkg)
6764                                                 except portage.exception.InvalidAtom, e:
6765                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6766                                                         show_invalid_depstring_notice(
6767                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6768                                                         return False
6769                                 for cpv in stale_cache:
6770                                         del blocker_cache[cpv]
6771                                 blocker_cache.flush()
6772                                 del blocker_cache
6773
6774                 # Discard any "uninstall" tasks scheduled by previous calls
6775                 # to this method, since those tasks may not make sense given
6776                 # the current graph state.
6777                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6778                 if previous_uninstall_tasks:
6779                         self._blocker_uninstalls = digraph()
6780                         self.digraph.difference_update(previous_uninstall_tasks)
6781
6782                 for blocker in self._blocker_parents.leaf_nodes():
6783                         self.spinner.update()
6784                         root_config = self.roots[blocker.root]
6785                         virtuals = root_config.settings.getvirtuals()
6786                         myroot = blocker.root
6787                         initial_db = self.trees[myroot]["vartree"].dbapi
6788                         final_db = self.mydbapi[myroot]
6789                         
6790                         provider_virtual = False
6791                         if blocker.cp in virtuals and \
6792                                 not self._have_new_virt(blocker.root, blocker.cp):
6793                                 provider_virtual = True
6794
6795                         # Use this to check PROVIDE for each matched package
6796                         # when necessary.
6797                         atom_set = InternalPackageSet(
6798                                 initial_atoms=[blocker.atom])
6799
6800                         if provider_virtual:
6801                                 atoms = []
6802                                 for provider_entry in virtuals[blocker.cp]:
6803                                         provider_cp = \
6804                                                 portage.dep_getkey(provider_entry)
6805                                         atoms.append(blocker.atom.replace(
6806                                                 blocker.cp, provider_cp))
6807                         else:
6808                                 atoms = [blocker.atom]
6809
6810                         blocked_initial = set()
6811                         for atom in atoms:
6812                                 for pkg in initial_db.match_pkgs(atom):
6813                                         if atom_set.findAtomForPackage(pkg):
6814                                                 blocked_initial.add(pkg)
6815
6816                         blocked_final = set()
6817                         for atom in atoms:
6818                                 for pkg in final_db.match_pkgs(atom):
6819                                         if atom_set.findAtomForPackage(pkg):
6820                                                 blocked_final.add(pkg)
6821
6822                         if not blocked_initial and not blocked_final:
6823                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6824                                 self._blocker_parents.remove(blocker)
6825                                 # Discard any parents that don't have any more blockers.
6826                                 for pkg in parent_pkgs:
6827                                         self._irrelevant_blockers.add(blocker, pkg)
6828                                         if not self._blocker_parents.child_nodes(pkg):
6829                                                 self._blocker_parents.remove(pkg)
6830                                 continue
6831                         for parent in self._blocker_parents.parent_nodes(blocker):
6832                                 unresolved_blocks = False
6833                                 depends_on_order = set()
6834                                 for pkg in blocked_initial:
6835                                         if pkg.slot_atom == parent.slot_atom:
6836                                                 # TODO: Support blocks within slots in cases where it
6837                                                 # might make sense.  For example, a new version might
6838                                                 # require that the old version be uninstalled at build
6839                                                 # time.
6840                                                 continue
6841                                         if parent.installed:
6842                                                 # Two currently installed packages conflict with
6843                                                 # eachother. Ignore this case since the damage
6844                                                 # is already done and this would be likely to
6845                                                 # confuse users if displayed like a normal blocker.
6846                                                 continue
6847
6848                                         self._blocked_pkgs.add(pkg, blocker)
6849
6850                                         if parent.operation == "merge":
6851                                                 # Maybe the blocked package can be replaced or simply
6852                                                 # unmerged to resolve this block.
6853                                                 depends_on_order.add((pkg, parent))
6854                                                 continue
6855                                         # None of the above blocker resolutions techniques apply,
6856                                         # so apparently this one is unresolvable.
6857                                         unresolved_blocks = True
6858                                 for pkg in blocked_final:
6859                                         if pkg.slot_atom == parent.slot_atom:
6860                                                 # TODO: Support blocks within slots.
6861                                                 continue
6862                                         if parent.operation == "nomerge" and \
6863                                                 pkg.operation == "nomerge":
6864                                                 # This blocker will be handled the next time that a
6865                                                 # merge of either package is triggered.
6866                                                 continue
6867
6868                                         self._blocked_pkgs.add(pkg, blocker)
6869
6870                                         # Maybe the blocking package can be
6871                                         # unmerged to resolve this block.
6872                                         if parent.operation == "merge" and pkg.installed:
6873                                                 depends_on_order.add((pkg, parent))
6874                                                 continue
6875                                         elif parent.operation == "nomerge":
6876                                                 depends_on_order.add((parent, pkg))
6877                                                 continue
6878                                         # None of the above blocker resolutions techniques apply,
6879                                         # so apparently this one is unresolvable.
6880                                         unresolved_blocks = True
6881
6882                                 # Make sure we don't unmerge any package that have been pulled
6883                                 # into the graph.
6884                                 if not unresolved_blocks and depends_on_order:
6885                                         for inst_pkg, inst_task in depends_on_order:
6886                                                 if self.digraph.contains(inst_pkg) and \
6887                                                         self.digraph.parent_nodes(inst_pkg):
6888                                                         unresolved_blocks = True
6889                                                         break
6890
6891                                 if not unresolved_blocks and depends_on_order:
6892                                         for inst_pkg, inst_task in depends_on_order:
6893                                                 uninst_task = Package(built=inst_pkg.built,
6894                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6895                                                         metadata=inst_pkg.metadata,
6896                                                         operation="uninstall",
6897                                                         root_config=inst_pkg.root_config,
6898                                                         type_name=inst_pkg.type_name)
6899                                                 self._pkg_cache[uninst_task] = uninst_task
6900                                                 # Enforce correct merge order with a hard dep.
6901                                                 self.digraph.addnode(uninst_task, inst_task,
6902                                                         priority=BlockerDepPriority.instance)
6903                                                 # Count references to this blocker so that it can be
6904                                                 # invalidated after nodes referencing it have been
6905                                                 # merged.
6906                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6907                                 if not unresolved_blocks and not depends_on_order:
6908                                         self._irrelevant_blockers.add(blocker, parent)
6909                                         self._blocker_parents.remove_edge(blocker, parent)
6910                                         if not self._blocker_parents.parent_nodes(blocker):
6911                                                 self._blocker_parents.remove(blocker)
6912                                         if not self._blocker_parents.child_nodes(parent):
6913                                                 self._blocker_parents.remove(parent)
6914                                 if unresolved_blocks:
6915                                         self._unsolvable_blockers.add(blocker, parent)
6916
6917                 return True
6918
6919         def _accept_blocker_conflicts(self):
6920                 acceptable = False
6921                 for x in ("--buildpkgonly", "--fetchonly",
6922                         "--fetch-all-uri", "--nodeps"):
6923                         if x in self.myopts:
6924                                 acceptable = True
6925                                 break
6926                 return acceptable
6927
6928         def _merge_order_bias(self, mygraph):
6929                 """
6930                 For optimal leaf node selection, promote deep system runtime deps and
6931                 order nodes from highest to lowest overall reference count.
6932                 """
6933
6934                 node_info = {}
6935                 for node in mygraph.order:
6936                         node_info[node] = len(mygraph.parent_nodes(node))
6937                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6938
6939                 def cmp_merge_preference(node1, node2):
6940
6941                         if node1.operation == 'uninstall':
6942                                 if node2.operation == 'uninstall':
6943                                         return 0
6944                                 return 1
6945
6946                         if node2.operation == 'uninstall':
6947                                 if node1.operation == 'uninstall':
6948                                         return 0
6949                                 return -1
6950
6951                         node1_sys = node1 in deep_system_deps
6952                         node2_sys = node2 in deep_system_deps
6953                         if node1_sys != node2_sys:
6954                                 if node1_sys:
6955                                         return -1
6956                                 return 1
6957
6958                         return node_info[node2] - node_info[node1]
6959
6960                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6961
6962         def altlist(self, reversed=False):
6963
6964                 while self._serialized_tasks_cache is None:
6965                         self._resolve_conflicts()
6966                         try:
6967                                 self._serialized_tasks_cache, self._scheduler_graph = \
6968                                         self._serialize_tasks()
6969                         except self._serialize_tasks_retry:
6970                                 pass
6971
6972                 retlist = self._serialized_tasks_cache[:]
6973                 if reversed:
6974                         retlist.reverse()
6975                 return retlist
6976
6977         def schedulerGraph(self):
6978                 """
6979                 The scheduler graph is identical to the normal one except that
6980                 uninstall edges are reversed in specific cases that require
6981                 conflicting packages to be temporarily installed simultaneously.
6982                 This is intended for use by the Scheduler in it's parallelization
6983                 logic. It ensures that temporary simultaneous installation of
6984                 conflicting packages is avoided when appropriate (especially for
6985                 !!atom blockers), but allowed in specific cases that require it.
6986
6987                 Note that this method calls break_refs() which alters the state of
6988                 internal Package instances such that this depgraph instance should
6989                 not be used to perform any more calculations.
6990                 """
6991                 if self._scheduler_graph is None:
6992                         self.altlist()
6993                 self.break_refs(self._scheduler_graph.order)
6994                 return self._scheduler_graph
6995
6996         def break_refs(self, nodes):
6997                 """
6998                 Take a mergelist like that returned from self.altlist() and
6999                 break any references that lead back to the depgraph. This is
7000                 useful if you want to hold references to packages without
7001                 also holding the depgraph on the heap.
7002                 """
7003                 for node in nodes:
7004                         if hasattr(node, "root_config"):
7005                                 # The FakeVartree references the _package_cache which
7006                                 # references the depgraph. So that Package instances don't
7007                                 # hold the depgraph and FakeVartree on the heap, replace
7008                                 # the RootConfig that references the FakeVartree with the
7009                                 # original RootConfig instance which references the actual
7010                                 # vartree.
7011                                 node.root_config = \
7012                                         self._trees_orig[node.root_config.root]["root_config"]
7013
7014         def _resolve_conflicts(self):
7015                 if not self._complete_graph():
7016                         raise self._unknown_internal_error()
7017
7018                 if not self.validate_blockers():
7019                         raise self._unknown_internal_error()
7020
7021                 if self._slot_collision_info:
7022                         self._process_slot_conflicts()
7023
7024         def _serialize_tasks(self):
7025
7026                 if "--debug" in self.myopts:
7027                         writemsg("\ndigraph:\n\n", noiselevel=-1)
7028                         self.digraph.debug_print()
7029                         writemsg("\n", noiselevel=-1)
7030
7031                 scheduler_graph = self.digraph.copy()
7032                 mygraph=self.digraph.copy()
7033                 # Prune "nomerge" root nodes if nothing depends on them, since
7034                 # otherwise they slow down merge order calculation. Don't remove
7035                 # non-root nodes since they help optimize merge order in some cases
7036                 # such as revdep-rebuild.
7037                 removed_nodes = set()
7038                 while True:
7039                         for node in mygraph.root_nodes():
7040                                 if not isinstance(node, Package) or \
7041                                         node.installed or node.onlydeps:
7042                                         removed_nodes.add(node)
7043                         if removed_nodes:
7044                                 self.spinner.update()
7045                                 mygraph.difference_update(removed_nodes)
7046                         if not removed_nodes:
7047                                 break
7048                         removed_nodes.clear()
7049                 self._merge_order_bias(mygraph)
7050                 def cmp_circular_bias(n1, n2):
7051                         """
7052                         RDEPEND is stronger than PDEPEND and this function
7053                         measures such a strength bias within a circular
7054                         dependency relationship.
7055                         """
7056                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7057                                 ignore_priority=priority_range.ignore_medium_soft)
7058                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7059                                 ignore_priority=priority_range.ignore_medium_soft)
7060                         if n1_n2_medium == n2_n1_medium:
7061                                 return 0
7062                         elif n1_n2_medium:
7063                                 return 1
7064                         return -1
7065                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7066                 retlist=[]
7067                 # Contains uninstall tasks that have been scheduled to
7068                 # occur after overlapping blockers have been installed.
7069                 scheduled_uninstalls = set()
7070                 # Contains any Uninstall tasks that have been ignored
7071                 # in order to avoid the circular deps code path. These
7072                 # correspond to blocker conflicts that could not be
7073                 # resolved.
7074                 ignored_uninstall_tasks = set()
7075                 have_uninstall_task = False
7076                 complete = "complete" in self.myparams
7077                 asap_nodes = []
7078
7079                 def get_nodes(**kwargs):
7080                         """
7081                         Returns leaf nodes excluding Uninstall instances
7082                         since those should be executed as late as possible.
7083                         """
7084                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7085                                 if isinstance(node, Package) and \
7086                                         (node.operation != "uninstall" or \
7087                                         node in scheduled_uninstalls)]
7088
7089                 # sys-apps/portage needs special treatment if ROOT="/"
7090                 running_root = self._running_root.root
7091                 from portage.const import PORTAGE_PACKAGE_ATOM
7092                 runtime_deps = InternalPackageSet(
7093                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7094                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7095                         PORTAGE_PACKAGE_ATOM)
7096                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7097                         PORTAGE_PACKAGE_ATOM)
7098
7099                 if running_portage:
7100                         running_portage = running_portage[0]
7101                 else:
7102                         running_portage = None
7103
7104                 if replacement_portage:
7105                         replacement_portage = replacement_portage[0]
7106                 else:
7107                         replacement_portage = None
7108
7109                 if replacement_portage == running_portage:
7110                         replacement_portage = None
7111
7112                 if replacement_portage is not None:
7113                         # update from running_portage to replacement_portage asap
7114                         asap_nodes.append(replacement_portage)
7115
7116                 if running_portage is not None:
7117                         try:
7118                                 portage_rdepend = self._select_atoms_highest_available(
7119                                         running_root, running_portage.metadata["RDEPEND"],
7120                                         myuse=running_portage.use.enabled,
7121                                         parent=running_portage, strict=False)
7122                         except portage.exception.InvalidDependString, e:
7123                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7124                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7125                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7126                                 del e
7127                                 portage_rdepend = []
7128                         runtime_deps.update(atom for atom in portage_rdepend \
7129                                 if not atom.startswith("!"))
7130
7131                 def gather_deps(ignore_priority, mergeable_nodes,
7132                         selected_nodes, node):
7133                         """
7134                         Recursively gather a group of nodes that RDEPEND on
7135                         eachother. This ensures that they are merged as a group
7136                         and get their RDEPENDs satisfied as soon as possible.
7137                         """
7138                         if node in selected_nodes:
7139                                 return True
7140                         if node not in mergeable_nodes:
7141                                 return False
7142                         if node == replacement_portage and \
7143                                 mygraph.child_nodes(node,
7144                                 ignore_priority=priority_range.ignore_medium_soft):
7145                                 # Make sure that portage always has all of it's
7146                                 # RDEPENDs installed first.
7147                                 return False
7148                         selected_nodes.add(node)
7149                         for child in mygraph.child_nodes(node,
7150                                 ignore_priority=ignore_priority):
7151                                 if not gather_deps(ignore_priority,
7152                                         mergeable_nodes, selected_nodes, child):
7153                                         return False
7154                         return True
7155
7156                 def ignore_uninst_or_med(priority):
7157                         if priority is BlockerDepPriority.instance:
7158                                 return True
7159                         return priority_range.ignore_medium(priority)
7160
7161                 def ignore_uninst_or_med_soft(priority):
7162                         if priority is BlockerDepPriority.instance:
7163                                 return True
7164                         return priority_range.ignore_medium_soft(priority)
7165
7166                 tree_mode = "--tree" in self.myopts
7167                 # Tracks whether or not the current iteration should prefer asap_nodes
7168                 # if available.  This is set to False when the previous iteration
7169                 # failed to select any nodes.  It is reset whenever nodes are
7170                 # successfully selected.
7171                 prefer_asap = True
7172
7173                 # Controls whether or not the current iteration should drop edges that
7174                 # are "satisfied" by installed packages, in order to solve circular
7175                 # dependencies. The deep runtime dependencies of installed packages are
7176                 # not checked in this case (bug #199856), so it must be avoided
7177                 # whenever possible.
7178                 drop_satisfied = False
7179
7180                 # State of variables for successive iterations that loosen the
7181                 # criteria for node selection.
7182                 #
7183                 # iteration   prefer_asap   drop_satisfied
7184                 # 1           True          False
7185                 # 2           False         False
7186                 # 3           False         True
7187                 #
7188                 # If no nodes are selected on the last iteration, it is due to
7189                 # unresolved blockers or circular dependencies.
7190
7191                 while not mygraph.empty():
7192                         self.spinner.update()
7193                         selected_nodes = None
7194                         ignore_priority = None
7195                         if drop_satisfied or (prefer_asap and asap_nodes):
7196                                 priority_range = DepPrioritySatisfiedRange
7197                         else:
7198                                 priority_range = DepPriorityNormalRange
7199                         if prefer_asap and asap_nodes:
7200                                 # ASAP nodes are merged before their soft deps. Go ahead and
7201                                 # select root nodes here if necessary, since it's typical for
7202                                 # the parent to have been removed from the graph already.
7203                                 asap_nodes = [node for node in asap_nodes \
7204                                         if mygraph.contains(node)]
7205                                 for node in asap_nodes:
7206                                         if not mygraph.child_nodes(node,
7207                                                 ignore_priority=priority_range.ignore_soft):
7208                                                 selected_nodes = [node]
7209                                                 asap_nodes.remove(node)
7210                                                 break
7211                         if not selected_nodes and \
7212                                 not (prefer_asap and asap_nodes):
7213                                 for i in xrange(priority_range.NONE,
7214                                         priority_range.MEDIUM_SOFT + 1):
7215                                         ignore_priority = priority_range.ignore_priority[i]
7216                                         nodes = get_nodes(ignore_priority=ignore_priority)
7217                                         if nodes:
7218                                                 # If there is a mix of uninstall nodes with other
7219                                                 # types, save the uninstall nodes for later since
7220                                                 # sometimes a merge node will render an uninstall
7221                                                 # node unnecessary (due to occupying the same slot),
7222                                                 # and we want to avoid executing a separate uninstall
7223                                                 # task in that case.
7224                                                 if len(nodes) > 1:
7225                                                         good_uninstalls = []
7226                                                         with_some_uninstalls_excluded = []
7227                                                         for node in nodes:
7228                                                                 if node.operation == "uninstall":
7229                                                                         slot_node = self.mydbapi[node.root
7230                                                                                 ].match_pkgs(node.slot_atom)
7231                                                                         if slot_node and \
7232                                                                                 slot_node[0].operation == "merge":
7233                                                                                 continue
7234                                                                         good_uninstalls.append(node)
7235                                                                 with_some_uninstalls_excluded.append(node)
7236                                                         if good_uninstalls:
7237                                                                 nodes = good_uninstalls
7238                                                         elif with_some_uninstalls_excluded:
7239                                                                 nodes = with_some_uninstalls_excluded
7240                                                         else:
7241                                                                 nodes = nodes
7242
7243                                                 if ignore_priority is None and not tree_mode:
7244                                                         # Greedily pop all of these nodes since no
7245                                                         # relationship has been ignored. This optimization
7246                                                         # destroys --tree output, so it's disabled in tree
7247                                                         # mode.
7248                                                         selected_nodes = nodes
7249                                                 else:
7250                                                         # For optimal merge order:
7251                                                         #  * Only pop one node.
7252                                                         #  * Removing a root node (node without a parent)
7253                                                         #    will not produce a leaf node, so avoid it.
7254                                                         #  * It's normal for a selected uninstall to be a
7255                                                         #    root node, so don't check them for parents.
7256                                                         for node in nodes:
7257                                                                 if node.operation == "uninstall" or \
7258                                                                         mygraph.parent_nodes(node):
7259                                                                         selected_nodes = [node]
7260                                                                         break
7261
7262                                                 if selected_nodes:
7263                                                         break
7264
7265                         if not selected_nodes:
7266                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7267                                 if nodes:
7268                                         mergeable_nodes = set(nodes)
7269                                         if prefer_asap and asap_nodes:
7270                                                 nodes = asap_nodes
7271                                         for i in xrange(priority_range.SOFT,
7272                                                 priority_range.MEDIUM_SOFT + 1):
7273                                                 ignore_priority = priority_range.ignore_priority[i]
7274                                                 for node in nodes:
7275                                                         if not mygraph.parent_nodes(node):
7276                                                                 continue
7277                                                         selected_nodes = set()
7278                                                         if gather_deps(ignore_priority,
7279                                                                 mergeable_nodes, selected_nodes, node):
7280                                                                 break
7281                                                         else:
7282                                                                 selected_nodes = None
7283                                                 if selected_nodes:
7284                                                         break
7285
7286                                         if prefer_asap and asap_nodes and not selected_nodes:
7287                                                 # We failed to find any asap nodes to merge, so ignore
7288                                                 # them for the next iteration.
7289                                                 prefer_asap = False
7290                                                 continue
7291
7292                         if selected_nodes and ignore_priority is not None:
7293                                 # Try to merge ignored medium_soft deps as soon as possible
7294                                 # if they're not satisfied by installed packages.
7295                                 for node in selected_nodes:
7296                                         children = set(mygraph.child_nodes(node))
7297                                         soft = children.difference(
7298                                                 mygraph.child_nodes(node,
7299                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7300                                         medium_soft = children.difference(
7301                                                 mygraph.child_nodes(node,
7302                                                         ignore_priority = \
7303                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7304                                         medium_soft.difference_update(soft)
7305                                         for child in medium_soft:
7306                                                 if child in selected_nodes:
7307                                                         continue
7308                                                 if child in asap_nodes:
7309                                                         continue
7310                                                 asap_nodes.append(child)
7311
7312                         if selected_nodes and len(selected_nodes) > 1:
7313                                 if not isinstance(selected_nodes, list):
7314                                         selected_nodes = list(selected_nodes)
7315                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7316
7317                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7318                                 # An Uninstall task needs to be executed in order to
7319                                 # avoid conflict if possible.
7320
7321                                 if drop_satisfied:
7322                                         priority_range = DepPrioritySatisfiedRange
7323                                 else:
7324                                         priority_range = DepPriorityNormalRange
7325
7326                                 mergeable_nodes = get_nodes(
7327                                         ignore_priority=ignore_uninst_or_med)
7328
7329                                 min_parent_deps = None
7330                                 uninst_task = None
7331                                 for task in myblocker_uninstalls.leaf_nodes():
7332                                         # Do some sanity checks so that system or world packages
7333                                         # don't get uninstalled inappropriately here (only really
7334                                         # necessary when --complete-graph has not been enabled).
7335
7336                                         if task in ignored_uninstall_tasks:
7337                                                 continue
7338
7339                                         if task in scheduled_uninstalls:
7340                                                 # It's been scheduled but it hasn't
7341                                                 # been executed yet due to dependence
7342                                                 # on installation of blocking packages.
7343                                                 continue
7344
7345                                         root_config = self.roots[task.root]
7346                                         inst_pkg = self._pkg_cache[
7347                                                 ("installed", task.root, task.cpv, "nomerge")]
7348
7349                                         if self.digraph.contains(inst_pkg):
7350                                                 continue
7351
7352                                         forbid_overlap = False
7353                                         heuristic_overlap = False
7354                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7355                                                 if blocker.eapi in ("0", "1"):
7356                                                         heuristic_overlap = True
7357                                                 elif blocker.atom.blocker.overlap.forbid:
7358                                                         forbid_overlap = True
7359                                                         break
7360                                         if forbid_overlap and running_root == task.root:
7361                                                 continue
7362
7363                                         if heuristic_overlap and running_root == task.root:
7364                                                 # Never uninstall sys-apps/portage or it's essential
7365                                                 # dependencies, except through replacement.
7366                                                 try:
7367                                                         runtime_dep_atoms = \
7368                                                                 list(runtime_deps.iterAtomsForPackage(task))
7369                                                 except portage.exception.InvalidDependString, e:
7370                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7371                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7372                                                                 (task.root, task.cpv, e), noiselevel=-1)
7373                                                         del e
7374                                                         continue
7375
7376                                                 # Don't uninstall a runtime dep if it appears
7377                                                 # to be the only suitable one installed.
7378                                                 skip = False
7379                                                 vardb = root_config.trees["vartree"].dbapi
7380                                                 for atom in runtime_dep_atoms:
7381                                                         other_version = None
7382                                                         for pkg in vardb.match_pkgs(atom):
7383                                                                 if pkg.cpv == task.cpv and \
7384                                                                         pkg.metadata["COUNTER"] == \
7385                                                                         task.metadata["COUNTER"]:
7386                                                                         continue
7387                                                                 other_version = pkg
7388                                                                 break
7389                                                         if other_version is None:
7390                                                                 skip = True
7391                                                                 break
7392                                                 if skip:
7393                                                         continue
7394
7395                                                 # For packages in the system set, don't take
7396                                                 # any chances. If the conflict can't be resolved
7397                                                 # by a normal replacement operation then abort.
7398                                                 skip = False
7399                                                 try:
7400                                                         for atom in root_config.sets[
7401                                                                 "system"].iterAtomsForPackage(task):
7402                                                                 skip = True
7403                                                                 break
7404                                                 except portage.exception.InvalidDependString, e:
7405                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7406                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7407                                                                 (task.root, task.cpv, e), noiselevel=-1)
7408                                                         del e
7409                                                         skip = True
7410                                                 if skip:
7411                                                         continue
7412
7413                                         # Note that the world check isn't always
7414                                         # necessary since self._complete_graph() will
7415                                         # add all packages from the system and world sets to the
7416                                         # graph. This just allows unresolved conflicts to be
7417                                         # detected as early as possible, which makes it possible
7418                                         # to avoid calling self._complete_graph() when it is
7419                                         # unnecessary due to blockers triggering an abortion.
7420                                         if not complete:
7421                                                 # For packages in the world set, go ahead an uninstall
7422                                                 # when necessary, as long as the atom will be satisfied
7423                                                 # in the final state.
7424                                                 graph_db = self.mydbapi[task.root]
7425                                                 skip = False
7426                                                 try:
7427                                                         for atom in root_config.sets[
7428                                                                 "world"].iterAtomsForPackage(task):
7429                                                                 satisfied = False
7430                                                                 for pkg in graph_db.match_pkgs(atom):
7431                                                                         if pkg == inst_pkg:
7432                                                                                 continue
7433                                                                         satisfied = True
7434                                                                         break
7435                                                                 if not satisfied:
7436                                                                         skip = True
7437                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7438                                                                         break
7439                                                 except portage.exception.InvalidDependString, e:
7440                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7441                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7442                                                                 (task.root, task.cpv, e), noiselevel=-1)
7443                                                         del e
7444                                                         skip = True
7445                                                 if skip:
7446                                                         continue
7447
7448                                         # Check the deps of parent nodes to ensure that
7449                                         # the chosen task produces a leaf node. Maybe
7450                                         # this can be optimized some more to make the
7451                                         # best possible choice, but the current algorithm
7452                                         # is simple and should be near optimal for most
7453                                         # common cases.
7454                                         mergeable_parent = False
7455                                         parent_deps = set()
7456                                         for parent in mygraph.parent_nodes(task):
7457                                                 parent_deps.update(mygraph.child_nodes(parent,
7458                                                         ignore_priority=priority_range.ignore_medium_soft))
7459                                                 if parent in mergeable_nodes and \
7460                                                         gather_deps(ignore_uninst_or_med_soft,
7461                                                         mergeable_nodes, set(), parent):
7462                                                         mergeable_parent = True
7463
7464                                         if not mergeable_parent:
7465                                                 continue
7466
7467                                         parent_deps.remove(task)
7468                                         if min_parent_deps is None or \
7469                                                 len(parent_deps) < min_parent_deps:
7470                                                 min_parent_deps = len(parent_deps)
7471                                                 uninst_task = task
7472
7473                                 if uninst_task is not None:
7474                                         # The uninstall is performed only after blocking
7475                                         # packages have been merged on top of it. File
7476                                         # collisions between blocking packages are detected
7477                                         # and removed from the list of files to be uninstalled.
7478                                         scheduled_uninstalls.add(uninst_task)
7479                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7480
7481                                         # Reverse the parent -> uninstall edges since we want
7482                                         # to do the uninstall after blocking packages have
7483                                         # been merged on top of it.
7484                                         mygraph.remove(uninst_task)
7485                                         for blocked_pkg in parent_nodes:
7486                                                 mygraph.add(blocked_pkg, uninst_task,
7487                                                         priority=BlockerDepPriority.instance)
7488                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7489                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7490                                                         priority=BlockerDepPriority.instance)
7491
7492                                         # Reset the state variables for leaf node selection and
7493                                         # continue trying to select leaf nodes.
7494                                         prefer_asap = True
7495                                         drop_satisfied = False
7496                                         continue
7497
7498                         if not selected_nodes:
7499                                 # Only select root nodes as a last resort. This case should
7500                                 # only trigger when the graph is nearly empty and the only
7501                                 # remaining nodes are isolated (no parents or children). Since
7502                                 # the nodes must be isolated, ignore_priority is not needed.
7503                                 selected_nodes = get_nodes()
7504
7505                         if not selected_nodes and not drop_satisfied:
7506                                 drop_satisfied = True
7507                                 continue
7508
7509                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7510                                 # If possible, drop an uninstall task here in order to avoid
7511                                 # the circular deps code path. The corresponding blocker will
7512                                 # still be counted as an unresolved conflict.
7513                                 uninst_task = None
7514                                 for node in myblocker_uninstalls.leaf_nodes():
7515                                         try:
7516                                                 mygraph.remove(node)
7517                                         except KeyError:
7518                                                 pass
7519                                         else:
7520                                                 uninst_task = node
7521                                                 ignored_uninstall_tasks.add(node)
7522                                                 break
7523
7524                                 if uninst_task is not None:
7525                                         # Reset the state variables for leaf node selection and
7526                                         # continue trying to select leaf nodes.
7527                                         prefer_asap = True
7528                                         drop_satisfied = False
7529                                         continue
7530
7531                         if not selected_nodes:
7532                                 self._circular_deps_for_display = mygraph
7533                                 raise self._unknown_internal_error()
7534
7535                         # At this point, we've succeeded in selecting one or more nodes, so
7536                         # reset state variables for leaf node selection.
7537                         prefer_asap = True
7538                         drop_satisfied = False
7539
7540                         mygraph.difference_update(selected_nodes)
7541
7542                         for node in selected_nodes:
7543                                 if isinstance(node, Package) and \
7544                                         node.operation == "nomerge":
7545                                         continue
7546
7547                                 # Handle interactions between blockers
7548                                 # and uninstallation tasks.
7549                                 solved_blockers = set()
7550                                 uninst_task = None
7551                                 if isinstance(node, Package) and \
7552                                         "uninstall" == node.operation:
7553                                         have_uninstall_task = True
7554                                         uninst_task = node
7555                                 else:
7556                                         vardb = self.trees[node.root]["vartree"].dbapi
7557                                         previous_cpv = vardb.match(node.slot_atom)
7558                                         if previous_cpv:
7559                                                 # The package will be replaced by this one, so remove
7560                                                 # the corresponding Uninstall task if necessary.
7561                                                 previous_cpv = previous_cpv[0]
7562                                                 uninst_task = \
7563                                                         ("installed", node.root, previous_cpv, "uninstall")
7564                                                 try:
7565                                                         mygraph.remove(uninst_task)
7566                                                 except KeyError:
7567                                                         pass
7568
7569                                 if uninst_task is not None and \
7570                                         uninst_task not in ignored_uninstall_tasks and \
7571                                         myblocker_uninstalls.contains(uninst_task):
7572                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7573                                         myblocker_uninstalls.remove(uninst_task)
7574                                         # Discard any blockers that this Uninstall solves.
7575                                         for blocker in blocker_nodes:
7576                                                 if not myblocker_uninstalls.child_nodes(blocker):
7577                                                         myblocker_uninstalls.remove(blocker)
7578                                                         solved_blockers.add(blocker)
7579
7580                                 retlist.append(node)
7581
7582                                 if (isinstance(node, Package) and \
7583                                         "uninstall" == node.operation) or \
7584                                         (uninst_task is not None and \
7585                                         uninst_task in scheduled_uninstalls):
7586                                         # Include satisfied blockers in the merge list
7587                                         # since the user might be interested and also
7588                                         # it serves as an indicator that blocking packages
7589                                         # will be temporarily installed simultaneously.
7590                                         for blocker in solved_blockers:
7591                                                 retlist.append(Blocker(atom=blocker.atom,
7592                                                         root=blocker.root, eapi=blocker.eapi,
7593                                                         satisfied=True))
7594
7595                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7596                 for node in myblocker_uninstalls.root_nodes():
7597                         unsolvable_blockers.add(node)
7598
7599                 for blocker in unsolvable_blockers:
7600                         retlist.append(blocker)
7601
7602                 # If any Uninstall tasks need to be executed in order
7603                 # to avoid a conflict, complete the graph with any
7604                 # dependencies that may have been initially
7605                 # neglected (to ensure that unsafe Uninstall tasks
7606                 # are properly identified and blocked from execution).
7607                 if have_uninstall_task and \
7608                         not complete and \
7609                         not unsolvable_blockers:
7610                         self.myparams.add("complete")
7611                         raise self._serialize_tasks_retry("")
7612
7613                 if unsolvable_blockers and \
7614                         not self._accept_blocker_conflicts():
7615                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7616                         self._serialized_tasks_cache = retlist[:]
7617                         self._scheduler_graph = scheduler_graph
7618                         raise self._unknown_internal_error()
7619
7620                 if self._slot_collision_info and \
7621                         not self._accept_blocker_conflicts():
7622                         self._serialized_tasks_cache = retlist[:]
7623                         self._scheduler_graph = scheduler_graph
7624                         raise self._unknown_internal_error()
7625
7626                 return retlist, scheduler_graph
7627
7628         def _show_circular_deps(self, mygraph):
7629                 # No leaf nodes are available, so we have a circular
7630                 # dependency panic situation.  Reduce the noise level to a
7631                 # minimum via repeated elimination of root nodes since they
7632                 # have no parents and thus can not be part of a cycle.
7633                 while True:
7634                         root_nodes = mygraph.root_nodes(
7635                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7636                         if not root_nodes:
7637                                 break
7638                         mygraph.difference_update(root_nodes)
7639                 # Display the USE flags that are enabled on nodes that are part
7640                 # of dependency cycles in case that helps the user decide to
7641                 # disable some of them.
7642                 display_order = []
7643                 tempgraph = mygraph.copy()
7644                 while not tempgraph.empty():
7645                         nodes = tempgraph.leaf_nodes()
7646                         if not nodes:
7647                                 node = tempgraph.order[0]
7648                         else:
7649                                 node = nodes[0]
7650                         display_order.append(node)
7651                         tempgraph.remove(node)
7652                 display_order.reverse()
7653                 self.myopts.pop("--quiet", None)
7654                 self.myopts.pop("--verbose", None)
7655                 self.myopts["--tree"] = True
7656                 portage.writemsg("\n\n", noiselevel=-1)
7657                 self.display(display_order)
7658                 prefix = colorize("BAD", " * ")
7659                 portage.writemsg("\n", noiselevel=-1)
7660                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7661                         noiselevel=-1)
7662                 portage.writemsg("\n", noiselevel=-1)
7663                 mygraph.debug_print()
7664                 portage.writemsg("\n", noiselevel=-1)
7665                 portage.writemsg(prefix + "Note that circular dependencies " + \
7666                         "can often be avoided by temporarily\n", noiselevel=-1)
7667                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7668                         "optional dependencies.\n", noiselevel=-1)
7669
7670         def _show_merge_list(self):
7671                 if self._serialized_tasks_cache is not None and \
7672                         not (self._displayed_list and \
7673                         (self._displayed_list == self._serialized_tasks_cache or \
7674                         self._displayed_list == \
7675                                 list(reversed(self._serialized_tasks_cache)))):
7676                         display_list = self._serialized_tasks_cache[:]
7677                         if "--tree" in self.myopts:
7678                                 display_list.reverse()
7679                         self.display(display_list)
7680
7681         def _show_unsatisfied_blockers(self, blockers):
7682                 self._show_merge_list()
7683                 msg = "Error: The above package list contains " + \
7684                         "packages which cannot be installed " + \
7685                         "at the same time on the same system."
7686                 prefix = colorize("BAD", " * ")
7687                 from textwrap import wrap
7688                 portage.writemsg("\n", noiselevel=-1)
7689                 for line in wrap(msg, 70):
7690                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7691
7692                 # Display the conflicting packages along with the packages
7693                 # that pulled them in. This is helpful for troubleshooting
7694                 # cases in which blockers don't solve automatically and
7695                 # the reasons are not apparent from the normal merge list
7696                 # display.
7697
7698                 conflict_pkgs = {}
7699                 for blocker in blockers:
7700                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7701                                 self._blocker_parents.parent_nodes(blocker)):
7702                                 parent_atoms = self._parent_atoms.get(pkg)
7703                                 if not parent_atoms:
7704                                         atom = self._blocked_world_pkgs.get(pkg)
7705                                         if atom is not None:
7706                                                 parent_atoms = set([("@world", atom)])
7707                                 if parent_atoms:
7708                                         conflict_pkgs[pkg] = parent_atoms
7709
7710                 if conflict_pkgs:
7711                         # Reduce noise by pruning packages that are only
7712                         # pulled in by other conflict packages.
7713                         pruned_pkgs = set()
7714                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7715                                 relevant_parent = False
7716                                 for parent, atom in parent_atoms:
7717                                         if parent not in conflict_pkgs:
7718                                                 relevant_parent = True
7719                                                 break
7720                                 if not relevant_parent:
7721                                         pruned_pkgs.add(pkg)
7722                         for pkg in pruned_pkgs:
7723                                 del conflict_pkgs[pkg]
7724
7725                 if conflict_pkgs:
7726                         msg = []
7727                         msg.append("\n")
7728                         indent = "  "
7729                         # Max number of parents shown, to avoid flooding the display.
7730                         max_parents = 3
7731                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7732
7733                                 pruned_list = set()
7734
7735                                 # Prefer packages that are not directly involved in a conflict.
7736                                 for parent_atom in parent_atoms:
7737                                         if len(pruned_list) >= max_parents:
7738                                                 break
7739                                         parent, atom = parent_atom
7740                                         if parent not in conflict_pkgs:
7741                                                 pruned_list.add(parent_atom)
7742
7743                                 for parent_atom in parent_atoms:
7744                                         if len(pruned_list) >= max_parents:
7745                                                 break
7746                                         pruned_list.add(parent_atom)
7747
7748                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7749                                 msg.append(indent + "%s pulled in by\n" % pkg)
7750
7751                                 for parent_atom in pruned_list:
7752                                         parent, atom = parent_atom
7753                                         msg.append(2*indent)
7754                                         if isinstance(parent,
7755                                                 (PackageArg, AtomArg)):
7756                                                 # For PackageArg and AtomArg types, it's
7757                                                 # redundant to display the atom attribute.
7758                                                 msg.append(str(parent))
7759                                         else:
7760                                                 # Display the specific atom from SetArg or
7761                                                 # Package types.
7762                                                 msg.append("%s required by %s" % (atom, parent))
7763                                         msg.append("\n")
7764
7765                                 if omitted_parents:
7766                                         msg.append(2*indent)
7767                                         msg.append("(and %d more)\n" % omitted_parents)
7768
7769                                 msg.append("\n")
7770
7771                         sys.stderr.write("".join(msg))
7772                         sys.stderr.flush()
7773
7774                 if "--quiet" not in self.myopts:
7775                         show_blocker_docs_link()
7776
7777         def display(self, mylist, favorites=[], verbosity=None):
7778
7779                 # This is used to prevent display_problems() from
7780                 # redundantly displaying this exact same merge list
7781                 # again via _show_merge_list().
7782                 self._displayed_list = mylist
7783
7784                 if verbosity is None:
7785                         verbosity = ("--quiet" in self.myopts and 1 or \
7786                                 "--verbose" in self.myopts and 3 or 2)
7787                 favorites_set = InternalPackageSet(favorites)
7788                 oneshot = "--oneshot" in self.myopts or \
7789                         "--onlydeps" in self.myopts
7790                 columns = "--columns" in self.myopts
7791                 changelogs=[]
7792                 p=[]
7793                 blockers = []
7794
7795                 counters = PackageCounters()
7796
7797                 if verbosity == 1 and "--verbose" not in self.myopts:
7798                         def create_use_string(*args):
7799                                 return ""
7800                 else:
7801                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7802                                 old_iuse, old_use,
7803                                 is_new, reinst_flags,
7804                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7805                                 alphabetical=("--alphabetical" in self.myopts)):
7806                                 enabled = []
7807                                 if alphabetical:
7808                                         disabled = enabled
7809                                         removed = enabled
7810                                 else:
7811                                         disabled = []
7812                                         removed = []
7813                                 cur_iuse = set(cur_iuse)
7814                                 enabled_flags = cur_iuse.intersection(cur_use)
7815                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7816                                 any_iuse = cur_iuse.union(old_iuse)
7817                                 any_iuse = list(any_iuse)
7818                                 any_iuse.sort()
7819                                 for flag in any_iuse:
7820                                         flag_str = None
7821                                         isEnabled = False
7822                                         reinst_flag = reinst_flags and flag in reinst_flags
7823                                         if flag in enabled_flags:
7824                                                 isEnabled = True
7825                                                 if is_new or flag in old_use and \
7826                                                         (all_flags or reinst_flag):
7827                                                         flag_str = red(flag)
7828                                                 elif flag not in old_iuse:
7829                                                         flag_str = yellow(flag) + "%*"
7830                                                 elif flag not in old_use:
7831                                                         flag_str = green(flag) + "*"
7832                                         elif flag in removed_iuse:
7833                                                 if all_flags or reinst_flag:
7834                                                         flag_str = yellow("-" + flag) + "%"
7835                                                         if flag in old_use:
7836                                                                 flag_str += "*"
7837                                                         flag_str = "(" + flag_str + ")"
7838                                                         removed.append(flag_str)
7839                                                 continue
7840                                         else:
7841                                                 if is_new or flag in old_iuse and \
7842                                                         flag not in old_use and \
7843                                                         (all_flags or reinst_flag):
7844                                                         flag_str = blue("-" + flag)
7845                                                 elif flag not in old_iuse:
7846                                                         flag_str = yellow("-" + flag)
7847                                                         if flag not in iuse_forced:
7848                                                                 flag_str += "%"
7849                                                 elif flag in old_use:
7850                                                         flag_str = green("-" + flag) + "*"
7851                                         if flag_str:
7852                                                 if flag in iuse_forced:
7853                                                         flag_str = "(" + flag_str + ")"
7854                                                 if isEnabled:
7855                                                         enabled.append(flag_str)
7856                                                 else:
7857                                                         disabled.append(flag_str)
7858
7859                                 if alphabetical:
7860                                         ret = " ".join(enabled)
7861                                 else:
7862                                         ret = " ".join(enabled + disabled + removed)
7863                                 if ret:
7864                                         ret = '%s="%s" ' % (name, ret)
7865                                 return ret
7866
7867                 repo_display = RepoDisplay(self.roots)
7868
7869                 tree_nodes = []
7870                 display_list = []
7871                 mygraph = self.digraph.copy()
7872
7873                 # If there are any Uninstall instances, add the corresponding
7874                 # blockers to the digraph (useful for --tree display).
7875
7876                 executed_uninstalls = set(node for node in mylist \
7877                         if isinstance(node, Package) and node.operation == "unmerge")
7878
7879                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7880                         uninstall_parents = \
7881                                 self._blocker_uninstalls.parent_nodes(uninstall)
7882                         if not uninstall_parents:
7883                                 continue
7884
7885                         # Remove the corresponding "nomerge" node and substitute
7886                         # the Uninstall node.
7887                         inst_pkg = self._pkg_cache[
7888                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7889                         try:
7890                                 mygraph.remove(inst_pkg)
7891                         except KeyError:
7892                                 pass
7893
7894                         try:
7895                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7896                         except KeyError:
7897                                 inst_pkg_blockers = []
7898
7899                         # Break the Package -> Uninstall edges.
7900                         mygraph.remove(uninstall)
7901
7902                         # Resolution of a package's blockers
7903                         # depend on it's own uninstallation.
7904                         for blocker in inst_pkg_blockers:
7905                                 mygraph.add(uninstall, blocker)
7906
7907                         # Expand Package -> Uninstall edges into
7908                         # Package -> Blocker -> Uninstall edges.
7909                         for blocker in uninstall_parents:
7910                                 mygraph.add(uninstall, blocker)
7911                                 for parent in self._blocker_parents.parent_nodes(blocker):
7912                                         if parent != inst_pkg:
7913                                                 mygraph.add(blocker, parent)
7914
7915                         # If the uninstall task did not need to be executed because
7916                         # of an upgrade, display Blocker -> Upgrade edges since the
7917                         # corresponding Blocker -> Uninstall edges will not be shown.
7918                         upgrade_node = \
7919                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7920                         if upgrade_node is not None and \
7921                                 uninstall not in executed_uninstalls:
7922                                 for blocker in uninstall_parents:
7923                                         mygraph.add(upgrade_node, blocker)
7924
7925                 unsatisfied_blockers = []
7926                 i = 0
7927                 depth = 0
7928                 shown_edges = set()
7929                 for x in mylist:
7930                         if isinstance(x, Blocker) and not x.satisfied:
7931                                 unsatisfied_blockers.append(x)
7932                                 continue
7933                         graph_key = x
7934                         if "--tree" in self.myopts:
7935                                 depth = len(tree_nodes)
7936                                 while depth and graph_key not in \
7937                                         mygraph.child_nodes(tree_nodes[depth-1]):
7938                                                 depth -= 1
7939                                 if depth:
7940                                         tree_nodes = tree_nodes[:depth]
7941                                         tree_nodes.append(graph_key)
7942                                         display_list.append((x, depth, True))
7943                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7944                                 else:
7945                                         traversed_nodes = set() # prevent endless circles
7946                                         traversed_nodes.add(graph_key)
7947                                         def add_parents(current_node, ordered):
7948                                                 parent_nodes = None
7949                                                 # Do not traverse to parents if this node is an
7950                                                 # an argument or a direct member of a set that has
7951                                                 # been specified as an argument (system or world).
7952                                                 if current_node not in self._set_nodes:
7953                                                         parent_nodes = mygraph.parent_nodes(current_node)
7954                                                 if parent_nodes:
7955                                                         child_nodes = set(mygraph.child_nodes(current_node))
7956                                                         selected_parent = None
7957                                                         # First, try to avoid a direct cycle.
7958                                                         for node in parent_nodes:
7959                                                                 if not isinstance(node, (Blocker, Package)):
7960                                                                         continue
7961                                                                 if node not in traversed_nodes and \
7962                                                                         node not in child_nodes:
7963                                                                         edge = (current_node, node)
7964                                                                         if edge in shown_edges:
7965                                                                                 continue
7966                                                                         selected_parent = node
7967                                                                         break
7968                                                         if not selected_parent:
7969                                                                 # A direct cycle is unavoidable.
7970                                                                 for node in parent_nodes:
7971                                                                         if not isinstance(node, (Blocker, Package)):
7972                                                                                 continue
7973                                                                         if node not in traversed_nodes:
7974                                                                                 edge = (current_node, node)
7975                                                                                 if edge in shown_edges:
7976                                                                                         continue
7977                                                                                 selected_parent = node
7978                                                                                 break
7979                                                         if selected_parent:
7980                                                                 shown_edges.add((current_node, selected_parent))
7981                                                                 traversed_nodes.add(selected_parent)
7982                                                                 add_parents(selected_parent, False)
7983                                                 display_list.append((current_node,
7984                                                         len(tree_nodes), ordered))
7985                                                 tree_nodes.append(current_node)
7986                                         tree_nodes = []
7987                                         add_parents(graph_key, True)
7988                         else:
7989                                 display_list.append((x, depth, True))
7990                 mylist = display_list
7991                 for x in unsatisfied_blockers:
7992                         mylist.append((x, 0, True))
7993
7994                 last_merge_depth = 0
7995                 for i in xrange(len(mylist)-1,-1,-1):
7996                         graph_key, depth, ordered = mylist[i]
7997                         if not ordered and depth == 0 and i > 0 \
7998                                 and graph_key == mylist[i-1][0] and \
7999                                 mylist[i-1][1] == 0:
8000                                 # An ordered node got a consecutive duplicate when the tree was
8001                                 # being filled in.
8002                                 del mylist[i]
8003                                 continue
8004                         if ordered and graph_key[-1] != "nomerge":
8005                                 last_merge_depth = depth
8006                                 continue
8007                         if depth >= last_merge_depth or \
8008                                 i < len(mylist) - 1 and \
8009                                 depth >= mylist[i+1][1]:
8010                                         del mylist[i]
8011
8012                 from portage import flatten
8013                 from portage.dep import use_reduce, paren_reduce
8014                 # files to fetch list - avoids counting a same file twice
8015                 # in size display (verbose mode)
8016                 myfetchlist=[]
8017
8018                 # Use this set to detect when all the "repoadd" strings are "[0]"
8019                 # and disable the entire repo display in this case.
8020                 repoadd_set = set()
8021
8022                 for mylist_index in xrange(len(mylist)):
8023                         x, depth, ordered = mylist[mylist_index]
8024                         pkg_type = x[0]
8025                         myroot = x[1]
8026                         pkg_key = x[2]
8027                         portdb = self.trees[myroot]["porttree"].dbapi
8028                         bindb  = self.trees[myroot]["bintree"].dbapi
8029                         vardb = self.trees[myroot]["vartree"].dbapi
8030                         vartree = self.trees[myroot]["vartree"]
8031                         pkgsettings = self.pkgsettings[myroot]
8032
8033                         fetch=" "
8034                         indent = " " * depth
8035
8036                         if isinstance(x, Blocker):
8037                                 if x.satisfied:
8038                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8039                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8040                                 else:
8041                                         blocker_style = "PKG_BLOCKER"
8042                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8043                                 if ordered:
8044                                         counters.blocks += 1
8045                                         if x.satisfied:
8046                                                 counters.blocks_satisfied += 1
8047                                 resolved = portage.key_expand(
8048                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8049                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8050                                         addl += " " + colorize(blocker_style, resolved)
8051                                 else:
8052                                         addl = "[%s %s] %s%s" % \
8053                                                 (colorize(blocker_style, "blocks"),
8054                                                 addl, indent, colorize(blocker_style, resolved))
8055                                 block_parents = self._blocker_parents.parent_nodes(x)
8056                                 block_parents = set([pnode[2] for pnode in block_parents])
8057                                 block_parents = ", ".join(block_parents)
8058                                 if resolved!=x[2]:
8059                                         addl += colorize(blocker_style,
8060                                                 " (\"%s\" is blocking %s)") % \
8061                                                 (str(x.atom).lstrip("!"), block_parents)
8062                                 else:
8063                                         addl += colorize(blocker_style,
8064                                                 " (is blocking %s)") % block_parents
8065                                 if isinstance(x, Blocker) and x.satisfied:
8066                                         if columns:
8067                                                 continue
8068                                         p.append(addl)
8069                                 else:
8070                                         blockers.append(addl)
8071                         else:
8072                                 pkg_status = x[3]
8073                                 pkg_merge = ordered and pkg_status == "merge"
8074                                 if not pkg_merge and pkg_status == "merge":
8075                                         pkg_status = "nomerge"
8076                                 built = pkg_type != "ebuild"
8077                                 installed = pkg_type == "installed"
8078                                 pkg = x
8079                                 metadata = pkg.metadata
8080                                 ebuild_path = None
8081                                 repo_name = metadata["repository"]
8082                                 if pkg_type == "ebuild":
8083                                         ebuild_path = portdb.findname(pkg_key)
8084                                         if not ebuild_path: # shouldn't happen
8085                                                 raise portage.exception.PackageNotFound(pkg_key)
8086                                         repo_path_real = os.path.dirname(os.path.dirname(
8087                                                 os.path.dirname(ebuild_path)))
8088                                 else:
8089                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8090                                 pkg_use = list(pkg.use.enabled)
8091                                 try:
8092                                         restrict = flatten(use_reduce(paren_reduce(
8093                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8094                                 except portage.exception.InvalidDependString, e:
8095                                         if not pkg.installed:
8096                                                 show_invalid_depstring_notice(x,
8097                                                         pkg.metadata["RESTRICT"], str(e))
8098                                                 del e
8099                                                 return 1
8100                                         restrict = []
8101                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8102                                         "fetch" in restrict:
8103                                         fetch = red("F")
8104                                         if ordered:
8105                                                 counters.restrict_fetch += 1
8106                                         if portdb.fetch_check(pkg_key, pkg_use):
8107                                                 fetch = green("f")
8108                                                 if ordered:
8109                                                         counters.restrict_fetch_satisfied += 1
8110
8111                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8112                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8113                                 myoldbest = []
8114                                 myinslotlist = None
8115                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8116                                 if vardb.cpv_exists(pkg_key):
8117                                         addl="  "+yellow("R")+fetch+"  "
8118                                         if ordered:
8119                                                 if pkg_merge:
8120                                                         counters.reinst += 1
8121                                                 elif pkg_status == "uninstall":
8122                                                         counters.uninst += 1
8123                                 # filter out old-style virtual matches
8124                                 elif installed_versions and \
8125                                         portage.cpv_getkey(installed_versions[0]) == \
8126                                         portage.cpv_getkey(pkg_key):
8127                                         myinslotlist = vardb.match(pkg.slot_atom)
8128                                         # If this is the first install of a new-style virtual, we
8129                                         # need to filter out old-style virtual matches.
8130                                         if myinslotlist and \
8131                                                 portage.cpv_getkey(myinslotlist[0]) != \
8132                                                 portage.cpv_getkey(pkg_key):
8133                                                 myinslotlist = None
8134                                         if myinslotlist:
8135                                                 myoldbest = myinslotlist[:]
8136                                                 addl = "   " + fetch
8137                                                 if not portage.dep.cpvequal(pkg_key,
8138                                                         portage.best([pkg_key] + myoldbest)):
8139                                                         # Downgrade in slot
8140                                                         addl += turquoise("U")+blue("D")
8141                                                         if ordered:
8142                                                                 counters.downgrades += 1
8143                                                 else:
8144                                                         # Update in slot
8145                                                         addl += turquoise("U") + " "
8146                                                         if ordered:
8147                                                                 counters.upgrades += 1
8148                                         else:
8149                                                 # New slot, mark it new.
8150                                                 addl = " " + green("NS") + fetch + "  "
8151                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8152                                                 if ordered:
8153                                                         counters.newslot += 1
8154
8155                                         if "--changelog" in self.myopts:
8156                                                 inst_matches = vardb.match(pkg.slot_atom)
8157                                                 if inst_matches:
8158                                                         changelogs.extend(self.calc_changelog(
8159                                                                 portdb.findname(pkg_key),
8160                                                                 inst_matches[0], pkg_key))
8161                                 else:
8162                                         addl = " " + green("N") + " " + fetch + "  "
8163                                         if ordered:
8164                                                 counters.new += 1
8165
8166                                 verboseadd = ""
8167                                 repoadd = None
8168
8169                                 if True:
8170                                         # USE flag display
8171                                         forced_flags = set()
8172                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8173                                         forced_flags.update(pkgsettings.useforce)
8174                                         forced_flags.update(pkgsettings.usemask)
8175
8176                                         cur_use = [flag for flag in pkg.use.enabled \
8177                                                 if flag in pkg.iuse.all]
8178                                         cur_iuse = sorted(pkg.iuse.all)
8179
8180                                         if myoldbest and myinslotlist:
8181                                                 previous_cpv = myoldbest[0]
8182                                         else:
8183                                                 previous_cpv = pkg.cpv
8184                                         if vardb.cpv_exists(previous_cpv):
8185                                                 old_iuse, old_use = vardb.aux_get(
8186                                                                 previous_cpv, ["IUSE", "USE"])
8187                                                 old_iuse = list(set(
8188                                                         filter_iuse_defaults(old_iuse.split())))
8189                                                 old_iuse.sort()
8190                                                 old_use = old_use.split()
8191                                                 is_new = False
8192                                         else:
8193                                                 old_iuse = []
8194                                                 old_use = []
8195                                                 is_new = True
8196
8197                                         old_use = [flag for flag in old_use if flag in old_iuse]
8198
8199                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8200                                         use_expand.sort()
8201                                         use_expand.reverse()
8202                                         use_expand_hidden = \
8203                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8204
8205                                         def map_to_use_expand(myvals, forcedFlags=False,
8206                                                 removeHidden=True):
8207                                                 ret = {}
8208                                                 forced = {}
8209                                                 for exp in use_expand:
8210                                                         ret[exp] = []
8211                                                         forced[exp] = set()
8212                                                         for val in myvals[:]:
8213                                                                 if val.startswith(exp.lower()+"_"):
8214                                                                         if val in forced_flags:
8215                                                                                 forced[exp].add(val[len(exp)+1:])
8216                                                                         ret[exp].append(val[len(exp)+1:])
8217                                                                         myvals.remove(val)
8218                                                 ret["USE"] = myvals
8219                                                 forced["USE"] = [val for val in myvals \
8220                                                         if val in forced_flags]
8221                                                 if removeHidden:
8222                                                         for exp in use_expand_hidden:
8223                                                                 ret.pop(exp, None)
8224                                                 if forcedFlags:
8225                                                         return ret, forced
8226                                                 return ret
8227
8228                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8229                                         # are the only thing that triggered reinstallation.
8230                                         reinst_flags_map = {}
8231                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8232                                         reinst_expand_map = None
8233                                         if reinstall_for_flags:
8234                                                 reinst_flags_map = map_to_use_expand(
8235                                                         list(reinstall_for_flags), removeHidden=False)
8236                                                 for k in list(reinst_flags_map):
8237                                                         if not reinst_flags_map[k]:
8238                                                                 del reinst_flags_map[k]
8239                                                 if not reinst_flags_map.get("USE"):
8240                                                         reinst_expand_map = reinst_flags_map.copy()
8241                                                         reinst_expand_map.pop("USE", None)
8242                                         if reinst_expand_map and \
8243                                                 not set(reinst_expand_map).difference(
8244                                                 use_expand_hidden):
8245                                                 use_expand_hidden = \
8246                                                         set(use_expand_hidden).difference(
8247                                                         reinst_expand_map)
8248
8249                                         cur_iuse_map, iuse_forced = \
8250                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8251                                         cur_use_map = map_to_use_expand(cur_use)
8252                                         old_iuse_map = map_to_use_expand(old_iuse)
8253                                         old_use_map = map_to_use_expand(old_use)
8254
8255                                         use_expand.sort()
8256                                         use_expand.insert(0, "USE")
8257                                         
8258                                         for key in use_expand:
8259                                                 if key in use_expand_hidden:
8260                                                         continue
8261                                                 verboseadd += create_use_string(key.upper(),
8262                                                         cur_iuse_map[key], iuse_forced[key],
8263                                                         cur_use_map[key], old_iuse_map[key],
8264                                                         old_use_map[key], is_new,
8265                                                         reinst_flags_map.get(key))
8266
8267                                 if verbosity == 3:
8268                                         # size verbose
8269                                         mysize=0
8270                                         if pkg_type == "ebuild" and pkg_merge:
8271                                                 try:
8272                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8273                                                                 useflags=pkg_use, debug=self.edebug)
8274                                                 except portage.exception.InvalidDependString, e:
8275                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8276                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8277                                                         del e
8278                                                         return 1
8279                                                 if myfilesdict is None:
8280                                                         myfilesdict="[empty/missing/bad digest]"
8281                                                 else:
8282                                                         for myfetchfile in myfilesdict:
8283                                                                 if myfetchfile not in myfetchlist:
8284                                                                         mysize+=myfilesdict[myfetchfile]
8285                                                                         myfetchlist.append(myfetchfile)
8286                                                         if ordered:
8287                                                                 counters.totalsize += mysize
8288                                                 verboseadd += format_size(mysize)
8289
8290                                         # overlay verbose
8291                                         # assign index for a previous version in the same slot
8292                                         has_previous = False
8293                                         repo_name_prev = None
8294                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8295                                                 metadata["SLOT"])
8296                                         slot_matches = vardb.match(slot_atom)
8297                                         if slot_matches:
8298                                                 has_previous = True
8299                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8300                                                         ["repository"])[0]
8301
8302                                         # now use the data to generate output
8303                                         if pkg.installed or not has_previous:
8304                                                 repoadd = repo_display.repoStr(repo_path_real)
8305                                         else:
8306                                                 repo_path_prev = None
8307                                                 if repo_name_prev:
8308                                                         repo_path_prev = portdb.getRepositoryPath(
8309                                                                 repo_name_prev)
8310                                                 if repo_path_prev == repo_path_real:
8311                                                         repoadd = repo_display.repoStr(repo_path_real)
8312                                                 else:
8313                                                         repoadd = "%s=>%s" % (
8314                                                                 repo_display.repoStr(repo_path_prev),
8315                                                                 repo_display.repoStr(repo_path_real))
8316                                         if repoadd:
8317                                                 repoadd_set.add(repoadd)
8318
8319                                 xs = [portage.cpv_getkey(pkg_key)] + \
8320                                         list(portage.catpkgsplit(pkg_key)[2:])
8321                                 if xs[2] == "r0":
8322                                         xs[2] = ""
8323                                 else:
8324                                         xs[2] = "-" + xs[2]
8325
8326                                 mywidth = 130
8327                                 if "COLUMNWIDTH" in self.settings:
8328                                         try:
8329                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8330                                         except ValueError, e:
8331                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8332                                                 portage.writemsg(
8333                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8334                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8335                                                 del e
8336                                 oldlp = mywidth - 30
8337                                 newlp = oldlp - 30
8338
8339                                 # Convert myoldbest from a list to a string.
8340                                 if not myoldbest:
8341                                         myoldbest = ""
8342                                 else:
8343                                         for pos, key in enumerate(myoldbest):
8344                                                 key = portage.catpkgsplit(key)[2] + \
8345                                                         "-" + portage.catpkgsplit(key)[3]
8346                                                 if key[-3:] == "-r0":
8347                                                         key = key[:-3]
8348                                                 myoldbest[pos] = key
8349                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8350
8351                                 pkg_cp = xs[0]
8352                                 root_config = self.roots[myroot]
8353                                 system_set = root_config.sets["system"]
8354                                 world_set  = root_config.sets["world"]
8355
8356                                 pkg_system = False
8357                                 pkg_world = False
8358                                 try:
8359                                         pkg_system = system_set.findAtomForPackage(pkg)
8360                                         pkg_world  = world_set.findAtomForPackage(pkg)
8361                                         if not (oneshot or pkg_world) and \
8362                                                 myroot == self.target_root and \
8363                                                 favorites_set.findAtomForPackage(pkg):
8364                                                 # Maybe it will be added to world now.
8365                                                 if create_world_atom(pkg, favorites_set, root_config):
8366                                                         pkg_world = True
8367                                 except portage.exception.InvalidDependString:
8368                                         # This is reported elsewhere if relevant.
8369                                         pass
8370
8371                                 def pkgprint(pkg_str):
8372                                         if pkg_merge:
8373                                                 if pkg_system:
8374                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8375                                                 elif pkg_world:
8376                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8377                                                 else:
8378                                                         return colorize("PKG_MERGE", pkg_str)
8379                                         elif pkg_status == "uninstall":
8380                                                 return colorize("PKG_UNINSTALL", pkg_str)
8381                                         else:
8382                                                 if pkg_system:
8383                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8384                                                 elif pkg_world:
8385                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8386                                                 else:
8387                                                         return colorize("PKG_NOMERGE", pkg_str)
8388
8389                                 try:
8390                                         properties = flatten(use_reduce(paren_reduce(
8391                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8392                                 except portage.exception.InvalidDependString, e:
8393                                         if not pkg.installed:
8394                                                 show_invalid_depstring_notice(pkg,
8395                                                         pkg.metadata["PROPERTIES"], str(e))
8396                                                 del e
8397                                                 return 1
8398                                         properties = []
8399                                 interactive = "interactive" in properties
8400                                 if interactive and pkg.operation == "merge":
8401                                         addl = colorize("WARN", "I") + addl[1:]
8402                                         if ordered:
8403                                                 counters.interactive += 1
8404
8405                                 if x[1]!="/":
8406                                         if myoldbest:
8407                                                 myoldbest +=" "
8408                                         if "--columns" in self.myopts:
8409                                                 if "--quiet" in self.myopts:
8410                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8411                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8412                                                         myprint=myprint+myoldbest
8413                                                         myprint=myprint+darkgreen("to "+x[1])
8414                                                         verboseadd = None
8415                                                 else:
8416                                                         if not pkg_merge:
8417                                                                 myprint = "[%s] %s%s" % \
8418                                                                         (pkgprint(pkg_status.ljust(13)),
8419                                                                         indent, pkgprint(pkg.cp))
8420                                                         else:
8421                                                                 myprint = "[%s %s] %s%s" % \
8422                                                                         (pkgprint(pkg.type_name), addl,
8423                                                                         indent, pkgprint(pkg.cp))
8424                                                         if (newlp-nc_len(myprint)) > 0:
8425                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8426                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8427                                                         if (oldlp-nc_len(myprint)) > 0:
8428                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8429                                                         myprint=myprint+myoldbest
8430                                                         myprint += darkgreen("to " + pkg.root)
8431                                         else:
8432                                                 if not pkg_merge:
8433                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8434                                                 else:
8435                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8436                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8437                                                         myoldbest + darkgreen("to " + myroot)
8438                                 else:
8439                                         if "--columns" in self.myopts:
8440                                                 if "--quiet" in self.myopts:
8441                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8442                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8443                                                         myprint=myprint+myoldbest
8444                                                         verboseadd = None
8445                                                 else:
8446                                                         if not pkg_merge:
8447                                                                 myprint = "[%s] %s%s" % \
8448                                                                         (pkgprint(pkg_status.ljust(13)),
8449                                                                         indent, pkgprint(pkg.cp))
8450                                                         else:
8451                                                                 myprint = "[%s %s] %s%s" % \
8452                                                                         (pkgprint(pkg.type_name), addl,
8453                                                                         indent, pkgprint(pkg.cp))
8454                                                         if (newlp-nc_len(myprint)) > 0:
8455                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8456                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8457                                                         if (oldlp-nc_len(myprint)) > 0:
8458                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8459                                                         myprint += myoldbest
8460                                         else:
8461                                                 if not pkg_merge:
8462                                                         myprint = "[%s] %s%s %s" % \
8463                                                                 (pkgprint(pkg_status.ljust(13)),
8464                                                                 indent, pkgprint(pkg.cpv),
8465                                                                 myoldbest)
8466                                                 else:
8467                                                         myprint = "[%s %s] %s%s %s" % \
8468                                                                 (pkgprint(pkg_type), addl, indent,
8469                                                                 pkgprint(pkg.cpv), myoldbest)
8470
8471                                 if columns and pkg.operation == "uninstall":
8472                                         continue
8473                                 p.append((myprint, verboseadd, repoadd))
8474
8475                                 if "--tree" not in self.myopts and \
8476                                         "--quiet" not in self.myopts and \
8477                                         not self._opts_no_restart.intersection(self.myopts) and \
8478                                         pkg.root == self._running_root.root and \
8479                                         portage.match_from_list(
8480                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8481                                         not vardb.cpv_exists(pkg.cpv) and \
8482                                         "--quiet" not in self.myopts:
8483                                                 if mylist_index < len(mylist) - 1:
8484                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8485                                                         p.append(colorize("WARN", "    then resume the merge."))
8486
8487                 out = sys.stdout
8488                 show_repos = repoadd_set and repoadd_set != set(["0"])
8489
8490                 for x in p:
8491                         if isinstance(x, basestring):
8492                                 out.write("%s\n" % (x,))
8493                                 continue
8494
8495                         myprint, verboseadd, repoadd = x
8496
8497                         if verboseadd:
8498                                 myprint += " " + verboseadd
8499
8500                         if show_repos and repoadd:
8501                                 myprint += " " + teal("[%s]" % repoadd)
8502
8503                         out.write("%s\n" % (myprint,))
8504
8505                 for x in blockers:
8506                         print x
8507
8508                 if verbosity == 3:
8509                         print
8510                         print counters
8511                         if show_repos:
8512                                 sys.stdout.write(str(repo_display))
8513
8514                 if "--changelog" in self.myopts:
8515                         print
8516                         for revision,text in changelogs:
8517                                 print bold('*'+revision)
8518                                 sys.stdout.write(text)
8519
8520                 sys.stdout.flush()
8521                 return os.EX_OK
8522
8523         def display_problems(self):
8524                 """
8525                 Display problems with the dependency graph such as slot collisions.
8526                 This is called internally by display() to show the problems _after_
8527                 the merge list where it is most likely to be seen, but if display()
8528                 is not going to be called then this method should be called explicitly
8529                 to ensure that the user is notified of problems with the graph.
8530
8531                 All output goes to stderr, except for unsatisfied dependencies which
8532                 go to stdout for parsing by programs such as autounmask.
8533                 """
8534
8535                 # Note that show_masked_packages() sends it's output to
8536                 # stdout, and some programs such as autounmask parse the
8537                 # output in cases when emerge bails out. However, when
8538                 # show_masked_packages() is called for installed packages
8539                 # here, the message is a warning that is more appropriate
8540                 # to send to stderr, so temporarily redirect stdout to
8541                 # stderr. TODO: Fix output code so there's a cleaner way
8542                 # to redirect everything to stderr.
8543                 sys.stdout.flush()
8544                 sys.stderr.flush()
8545                 stdout = sys.stdout
8546                 try:
8547                         sys.stdout = sys.stderr
8548                         self._display_problems()
8549                 finally:
8550                         sys.stdout = stdout
8551                         sys.stdout.flush()
8552                         sys.stderr.flush()
8553
8554                 # This goes to stdout for parsing by programs like autounmask.
8555                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8556                         self._show_unsatisfied_dep(*pargs, **kwargs)
8557
8558         def _display_problems(self):
8559                 if self._circular_deps_for_display is not None:
8560                         self._show_circular_deps(
8561                                 self._circular_deps_for_display)
8562
8563                 # The user is only notified of a slot conflict if
8564                 # there are no unresolvable blocker conflicts.
8565                 if self._unsatisfied_blockers_for_display is not None:
8566                         self._show_unsatisfied_blockers(
8567                                 self._unsatisfied_blockers_for_display)
8568                 else:
8569                         self._show_slot_collision_notice()
8570
8571                 # TODO: Add generic support for "set problem" handlers so that
8572                 # the below warnings aren't special cases for world only.
8573
8574                 if self._missing_args:
8575                         world_problems = False
8576                         if "world" in self._sets:
8577                                 # Filter out indirect members of world (from nested sets)
8578                                 # since only direct members of world are desired here.
8579                                 world_set = self.roots[self.target_root].sets["world"]
8580                                 for arg, atom in self._missing_args:
8581                                         if arg.name == "world" and atom in world_set:
8582                                                 world_problems = True
8583                                                 break
8584
8585                         if world_problems:
8586                                 sys.stderr.write("\n!!! Problems have been " + \
8587                                         "detected with your world file\n")
8588                                 sys.stderr.write("!!! Please run " + \
8589                                         green("emaint --check world")+"\n\n")
8590
8591                 if self._missing_args:
8592                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8593                                 " Ebuilds for the following packages are either all\n")
8594                         sys.stderr.write(colorize("BAD", "!!!") + \
8595                                 " masked or don't exist:\n")
8596                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8597                                 self._missing_args) + "\n")
8598
8599                 if self._pprovided_args:
8600                         arg_refs = {}
8601                         for arg, atom in self._pprovided_args:
8602                                 if isinstance(arg, SetArg):
8603                                         parent = arg.name
8604                                         arg_atom = (atom, atom)
8605                                 else:
8606                                         parent = "args"
8607                                         arg_atom = (arg.arg, atom)
8608                                 refs = arg_refs.setdefault(arg_atom, [])
8609                                 if parent not in refs:
8610                                         refs.append(parent)
8611                         msg = []
8612                         msg.append(bad("\nWARNING: "))
8613                         if len(self._pprovided_args) > 1:
8614                                 msg.append("Requested packages will not be " + \
8615                                         "merged because they are listed in\n")
8616                         else:
8617                                 msg.append("A requested package will not be " + \
8618                                         "merged because it is listed in\n")
8619                         msg.append("package.provided:\n\n")
8620                         problems_sets = set()
8621                         for (arg, atom), refs in arg_refs.iteritems():
8622                                 ref_string = ""
8623                                 if refs:
8624                                         problems_sets.update(refs)
8625                                         refs.sort()
8626                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8627                                         ref_string = " pulled in by " + ref_string
8628                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8629                         msg.append("\n")
8630                         if "world" in problems_sets:
8631                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8632                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8633                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8634                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8635                                 msg.append("The best course of action depends on the reason that an offending\n")
8636                                 msg.append("package.provided entry exists.\n\n")
8637                         sys.stderr.write("".join(msg))
8638
8639                 masked_packages = []
8640                 for pkg in self._masked_installed:
8641                         root_config = pkg.root_config
8642                         pkgsettings = self.pkgsettings[pkg.root]
8643                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8644                         masked_packages.append((root_config, pkgsettings,
8645                                 pkg.cpv, pkg.metadata, mreasons))
8646                 if masked_packages:
8647                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8648                                 " The following installed packages are masked:\n")
8649                         show_masked_packages(masked_packages)
8650                         show_mask_docs()
8651                         print
8652
8653         def calc_changelog(self,ebuildpath,current,next):
8654                 if ebuildpath == None or not os.path.exists(ebuildpath):
8655                         return []
8656                 current = '-'.join(portage.catpkgsplit(current)[1:])
8657                 if current.endswith('-r0'):
8658                         current = current[:-3]
8659                 next = '-'.join(portage.catpkgsplit(next)[1:])
8660                 if next.endswith('-r0'):
8661                         next = next[:-3]
8662                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8663                 try:
8664                         changelog = open(changelogpath).read()
8665                 except SystemExit, e:
8666                         raise # Needed else can't exit
8667                 except:
8668                         return []
8669                 divisions = self.find_changelog_tags(changelog)
8670                 #print 'XX from',current,'to',next
8671                 #for div,text in divisions: print 'XX',div
8672                 # skip entries for all revisions above the one we are about to emerge
8673                 for i in range(len(divisions)):
8674                         if divisions[i][0]==next:
8675                                 divisions = divisions[i:]
8676                                 break
8677                 # find out how many entries we are going to display
8678                 for i in range(len(divisions)):
8679                         if divisions[i][0]==current:
8680                                 divisions = divisions[:i]
8681                                 break
8682                 else:
8683                     # couldnt find the current revision in the list. display nothing
8684                         return []
8685                 return divisions
8686
8687         def find_changelog_tags(self,changelog):
8688                 divs = []
8689                 release = None
8690                 while 1:
8691                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8692                         if match is None:
8693                                 if release is not None:
8694                                         divs.append((release,changelog))
8695                                 return divs
8696                         if release is not None:
8697                                 divs.append((release,changelog[:match.start()]))
8698                         changelog = changelog[match.end():]
8699                         release = match.group(1)
8700                         if release.endswith('.ebuild'):
8701                                 release = release[:-7]
8702                         if release.endswith('-r0'):
8703                                 release = release[:-3]
8704
8705         def saveNomergeFavorites(self):
8706                 """Find atoms in favorites that are not in the mergelist and add them
8707                 to the world file if necessary."""
8708                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8709                         "--oneshot", "--onlydeps", "--pretend"):
8710                         if x in self.myopts:
8711                                 return
8712                 root_config = self.roots[self.target_root]
8713                 world_set = root_config.sets["world"]
8714
8715                 world_locked = False
8716                 if hasattr(world_set, "lock"):
8717                         world_set.lock()
8718                         world_locked = True
8719
8720                 if hasattr(world_set, "load"):
8721                         world_set.load() # maybe it's changed on disk
8722
8723                 args_set = self._sets["args"]
8724                 portdb = self.trees[self.target_root]["porttree"].dbapi
8725                 added_favorites = set()
8726                 for x in self._set_nodes:
8727                         pkg_type, root, pkg_key, pkg_status = x
8728                         if pkg_status != "nomerge":
8729                                 continue
8730
8731                         try:
8732                                 myfavkey = create_world_atom(x, args_set, root_config)
8733                                 if myfavkey:
8734                                         if myfavkey in added_favorites:
8735                                                 continue
8736                                         added_favorites.add(myfavkey)
8737                         except portage.exception.InvalidDependString, e:
8738                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8739                                         (pkg_key, str(e)), noiselevel=-1)
8740                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8741                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8742                                 del e
8743                 all_added = []
8744                 for k in self._sets:
8745                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8746                                 continue
8747                         s = SETPREFIX + k
8748                         if s in world_set:
8749                                 continue
8750                         all_added.append(SETPREFIX + k)
8751                 all_added.extend(added_favorites)
8752                 all_added.sort()
8753                 for a in all_added:
8754                         print ">>> Recording %s in \"world\" favorites file..." % \
8755                                 colorize("INFORM", str(a))
8756                 if all_added:
8757                         world_set.update(all_added)
8758
8759                 if world_locked:
8760                         world_set.unlock()
8761
8762         def loadResumeCommand(self, resume_data, skip_masked=False):
8763                 """
8764                 Add a resume command to the graph and validate it in the process.  This
8765                 will raise a PackageNotFound exception if a package is not available.
8766                 """
8767
8768                 if not isinstance(resume_data, dict):
8769                         return False
8770
8771                 mergelist = resume_data.get("mergelist")
8772                 if not isinstance(mergelist, list):
8773                         mergelist = []
8774
8775                 fakedb = self.mydbapi
8776                 trees = self.trees
8777                 serialized_tasks = []
8778                 masked_tasks = []
8779                 for x in mergelist:
8780                         if not (isinstance(x, list) and len(x) == 4):
8781                                 continue
8782                         pkg_type, myroot, pkg_key, action = x
8783                         if pkg_type not in self.pkg_tree_map:
8784                                 continue
8785                         if action != "merge":
8786                                 continue
8787                         tree_type = self.pkg_tree_map[pkg_type]
8788                         mydb = trees[myroot][tree_type].dbapi
8789                         db_keys = list(self._trees_orig[myroot][
8790                                 tree_type].dbapi._aux_cache_keys)
8791                         try:
8792                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8793                         except KeyError:
8794                                 # It does no exist or it is corrupt.
8795                                 if action == "uninstall":
8796                                         continue
8797                                 raise portage.exception.PackageNotFound(pkg_key)
8798                         installed = action == "uninstall"
8799                         built = pkg_type != "ebuild"
8800                         root_config = self.roots[myroot]
8801                         pkg = Package(built=built, cpv=pkg_key,
8802                                 installed=installed, metadata=metadata,
8803                                 operation=action, root_config=root_config,
8804                                 type_name=pkg_type)
8805                         if pkg_type == "ebuild":
8806                                 pkgsettings = self.pkgsettings[myroot]
8807                                 pkgsettings.setcpv(pkg)
8808                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8809                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8810                         self._pkg_cache[pkg] = pkg
8811
8812                         root_config = self.roots[pkg.root]
8813                         if "merge" == pkg.operation and \
8814                                 not visible(root_config.settings, pkg):
8815                                 if skip_masked:
8816                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8817                                 else:
8818                                         self._unsatisfied_deps_for_display.append(
8819                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8820
8821                         fakedb[myroot].cpv_inject(pkg)
8822                         serialized_tasks.append(pkg)
8823                         self.spinner.update()
8824
8825                 if self._unsatisfied_deps_for_display:
8826                         return False
8827
8828                 if not serialized_tasks or "--nodeps" in self.myopts:
8829                         self._serialized_tasks_cache = serialized_tasks
8830                         self._scheduler_graph = self.digraph
8831                 else:
8832                         self._select_package = self._select_pkg_from_graph
8833                         self.myparams.add("selective")
8834                         # Always traverse deep dependencies in order to account for
8835                         # potentially unsatisfied dependencies of installed packages.
8836                         # This is necessary for correct --keep-going or --resume operation
8837                         # in case a package from a group of circularly dependent packages
8838                         # fails. In this case, a package which has recently been installed
8839                         # may have an unsatisfied circular dependency (pulled in by
8840                         # PDEPEND, for example). So, even though a package is already
8841                         # installed, it may not have all of it's dependencies satisfied, so
8842                         # it may not be usable. If such a package is in the subgraph of
8843                         # deep depenedencies of a scheduled build, that build needs to
8844                         # be cancelled. In order for this type of situation to be
8845                         # recognized, deep traversal of dependencies is required.
8846                         self.myparams.add("deep")
8847
8848                         favorites = resume_data.get("favorites")
8849                         args_set = self._sets["args"]
8850                         if isinstance(favorites, list):
8851                                 args = self._load_favorites(favorites)
8852                         else:
8853                                 args = []
8854
8855                         for task in serialized_tasks:
8856                                 if isinstance(task, Package) and \
8857                                         task.operation == "merge":
8858                                         if not self._add_pkg(task, None):
8859                                                 return False
8860
8861                         # Packages for argument atoms need to be explicitly
8862                         # added via _add_pkg() so that they are included in the
8863                         # digraph (needed at least for --tree display).
8864                         for arg in args:
8865                                 for atom in arg.set:
8866                                         pkg, existing_node = self._select_package(
8867                                                 arg.root_config.root, atom)
8868                                         if existing_node is None and \
8869                                                 pkg is not None:
8870                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8871                                                         root=pkg.root, parent=arg)):
8872                                                         return False
8873
8874                         # Allow unsatisfied deps here to avoid showing a masking
8875                         # message for an unsatisfied dep that isn't necessarily
8876                         # masked.
8877                         if not self._create_graph(allow_unsatisfied=True):
8878                                 return False
8879
8880                         unsatisfied_deps = []
8881                         for dep in self._unsatisfied_deps:
8882                                 if not isinstance(dep.parent, Package):
8883                                         continue
8884                                 if dep.parent.operation == "merge":
8885                                         unsatisfied_deps.append(dep)
8886                                         continue
8887
8888                                 # For unsatisfied deps of installed packages, only account for
8889                                 # them if they are in the subgraph of dependencies of a package
8890                                 # which is scheduled to be installed.
8891                                 unsatisfied_install = False
8892                                 traversed = set()
8893                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8894                                 while dep_stack:
8895                                         node = dep_stack.pop()
8896                                         if not isinstance(node, Package):
8897                                                 continue
8898                                         if node.operation == "merge":
8899                                                 unsatisfied_install = True
8900                                                 break
8901                                         if node in traversed:
8902                                                 continue
8903                                         traversed.add(node)
8904                                         dep_stack.extend(self.digraph.parent_nodes(node))
8905
8906                                 if unsatisfied_install:
8907                                         unsatisfied_deps.append(dep)
8908
8909                         if masked_tasks or unsatisfied_deps:
8910                                 # This probably means that a required package
8911                                 # was dropped via --skipfirst. It makes the
8912                                 # resume list invalid, so convert it to a
8913                                 # UnsatisfiedResumeDep exception.
8914                                 raise self.UnsatisfiedResumeDep(self,
8915                                         masked_tasks + unsatisfied_deps)
8916                         self._serialized_tasks_cache = None
8917                         try:
8918                                 self.altlist()
8919                         except self._unknown_internal_error:
8920                                 return False
8921
8922                 return True
8923
8924         def _load_favorites(self, favorites):
8925                 """
8926                 Use a list of favorites to resume state from a
8927                 previous select_files() call. This creates similar
8928                 DependencyArg instances to those that would have
8929                 been created by the original select_files() call.
8930                 This allows Package instances to be matched with
8931                 DependencyArg instances during graph creation.
8932                 """
8933                 root_config = self.roots[self.target_root]
8934                 getSetAtoms = root_config.setconfig.getSetAtoms
8935                 sets = root_config.sets
8936                 args = []
8937                 for x in favorites:
8938                         if not isinstance(x, basestring):
8939                                 continue
8940                         if x in ("system", "world"):
8941                                 x = SETPREFIX + x
8942                         if x.startswith(SETPREFIX):
8943                                 s = x[len(SETPREFIX):]
8944                                 if s not in sets:
8945                                         continue
8946                                 if s in self._sets:
8947                                         continue
8948                                 # Recursively expand sets so that containment tests in
8949                                 # self._get_parent_sets() properly match atoms in nested
8950                                 # sets (like if world contains system).
8951                                 expanded_set = InternalPackageSet(
8952                                         initial_atoms=getSetAtoms(s))
8953                                 self._sets[s] = expanded_set
8954                                 args.append(SetArg(arg=x, set=expanded_set,
8955                                         root_config=root_config))
8956                         else:
8957                                 if not portage.isvalidatom(x):
8958                                         continue
8959                                 args.append(AtomArg(arg=x, atom=x,
8960                                         root_config=root_config))
8961
8962                 self._set_args(args)
8963                 return args
8964
8965         class UnsatisfiedResumeDep(portage.exception.PortageException):
8966                 """
8967                 A dependency of a resume list is not installed. This
8968                 can occur when a required package is dropped from the
8969                 merge list via --skipfirst.
8970                 """
8971                 def __init__(self, depgraph, value):
8972                         portage.exception.PortageException.__init__(self, value)
8973                         self.depgraph = depgraph
8974
8975         class _internal_exception(portage.exception.PortageException):
8976                 def __init__(self, value=""):
8977                         portage.exception.PortageException.__init__(self, value)
8978
8979         class _unknown_internal_error(_internal_exception):
8980                 """
8981                 Used by the depgraph internally to terminate graph creation.
8982                 The specific reason for the failure should have been dumped
8983                 to stderr, unfortunately, the exact reason for the failure
8984                 may not be known.
8985                 """
8986
8987         class _serialize_tasks_retry(_internal_exception):
8988                 """
8989                 This is raised by the _serialize_tasks() method when it needs to
8990                 be called again for some reason. The only case that it's currently
8991                 used for is when neglected dependencies need to be added to the
8992                 graph in order to avoid making a potentially unsafe decision.
8993                 """
8994
8995         class _dep_check_composite_db(portage.dbapi):
8996                 """
8997                 A dbapi-like interface that is optimized for use in dep_check() calls.
8998                 This is built on top of the existing depgraph package selection logic.
8999                 Some packages that have been added to the graph may be masked from this
9000                 view in order to influence the atom preference selection that occurs
9001                 via dep_check().
9002                 """
9003                 def __init__(self, depgraph, root):
9004                         portage.dbapi.__init__(self)
9005                         self._depgraph = depgraph
9006                         self._root = root
9007                         self._match_cache = {}
9008                         self._cpv_pkg_map = {}
9009
9010                 def _clear_cache(self):
9011                         self._match_cache.clear()
9012                         self._cpv_pkg_map.clear()
9013
9014                 def match(self, atom):
9015                         ret = self._match_cache.get(atom)
9016                         if ret is not None:
9017                                 return ret[:]
9018                         orig_atom = atom
9019                         if "/" not in atom:
9020                                 atom = self._dep_expand(atom)
9021                         pkg, existing = self._depgraph._select_package(self._root, atom)
9022                         if not pkg:
9023                                 ret = []
9024                         else:
9025                                 # Return the highest available from select_package() as well as
9026                                 # any matching slots in the graph db.
9027                                 slots = set()
9028                                 slots.add(pkg.metadata["SLOT"])
9029                                 atom_cp = portage.dep_getkey(atom)
9030                                 if pkg.cp.startswith("virtual/"):
9031                                         # For new-style virtual lookahead that occurs inside
9032                                         # dep_check(), examine all slots. This is needed
9033                                         # so that newer slots will not unnecessarily be pulled in
9034                                         # when a satisfying lower slot is already installed. For
9035                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9036                                         # there's no need to pull in a newer slot to satisfy a
9037                                         # virtual/jdk dependency.
9038                                         for db, pkg_type, built, installed, db_keys in \
9039                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9040                                                 for cpv in db.match(atom):
9041                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9042                                                                 continue
9043                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9044                                 ret = []
9045                                 if self._visible(pkg):
9046                                         self._cpv_pkg_map[pkg.cpv] = pkg
9047                                         ret.append(pkg.cpv)
9048                                 slots.remove(pkg.metadata["SLOT"])
9049                                 while slots:
9050                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9051                                         pkg, existing = self._depgraph._select_package(
9052                                                 self._root, slot_atom)
9053                                         if not pkg:
9054                                                 continue
9055                                         if not self._visible(pkg):
9056                                                 continue
9057                                         self._cpv_pkg_map[pkg.cpv] = pkg
9058                                         ret.append(pkg.cpv)
9059                                 if ret:
9060                                         self._cpv_sort_ascending(ret)
9061                         self._match_cache[orig_atom] = ret
9062                         return ret[:]
9063
9064                 def _visible(self, pkg):
9065                         if pkg.installed and "selective" not in self._depgraph.myparams:
9066                                 try:
9067                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9068                                 except (StopIteration, portage.exception.InvalidDependString):
9069                                         arg = None
9070                                 if arg:
9071                                         return False
9072                         if pkg.installed:
9073                                 try:
9074                                         if not visible(
9075                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9076                                                 return False
9077                                 except portage.exception.InvalidDependString:
9078                                         pass
9079                         in_graph = self._depgraph._slot_pkg_map[
9080                                 self._root].get(pkg.slot_atom)
9081                         if in_graph is None:
9082                                 # Mask choices for packages which are not the highest visible
9083                                 # version within their slot (since they usually trigger slot
9084                                 # conflicts).
9085                                 highest_visible, in_graph = self._depgraph._select_package(
9086                                         self._root, pkg.slot_atom)
9087                                 if pkg != highest_visible:
9088                                         return False
9089                         elif in_graph != pkg:
9090                                 # Mask choices for packages that would trigger a slot
9091                                 # conflict with a previously selected package.
9092                                 return False
9093                         return True
9094
9095                 def _dep_expand(self, atom):
9096                         """
9097                         This is only needed for old installed packages that may
9098                         contain atoms that are not fully qualified with a specific
9099                         category. Emulate the cpv_expand() function that's used by
9100                         dbapi.match() in cases like this. If there are multiple
9101                         matches, it's often due to a new-style virtual that has
9102                         been added, so try to filter those out to avoid raising
9103                         a ValueError.
9104                         """
9105                         root_config = self._depgraph.roots[self._root]
9106                         orig_atom = atom
9107                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9108                         if len(expanded_atoms) > 1:
9109                                 non_virtual_atoms = []
9110                                 for x in expanded_atoms:
9111                                         if not portage.dep_getkey(x).startswith("virtual/"):
9112                                                 non_virtual_atoms.append(x)
9113                                 if len(non_virtual_atoms) == 1:
9114                                         expanded_atoms = non_virtual_atoms
9115                         if len(expanded_atoms) > 1:
9116                                 # compatible with portage.cpv_expand()
9117                                 raise portage.exception.AmbiguousPackageName(
9118                                         [portage.dep_getkey(x) for x in expanded_atoms])
9119                         if expanded_atoms:
9120                                 atom = expanded_atoms[0]
9121                         else:
9122                                 null_atom = insert_category_into_atom(atom, "null")
9123                                 null_cp = portage.dep_getkey(null_atom)
9124                                 cat, atom_pn = portage.catsplit(null_cp)
9125                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9126                                 if virts_p:
9127                                         # Allow the resolver to choose which virtual.
9128                                         atom = insert_category_into_atom(atom, "virtual")
9129                                 else:
9130                                         atom = insert_category_into_atom(atom, "null")
9131                         return atom
9132
9133                 def aux_get(self, cpv, wants):
9134                         metadata = self._cpv_pkg_map[cpv].metadata
9135                         return [metadata.get(x, "") for x in wants]
9136
9137 class RepoDisplay(object):
9138         def __init__(self, roots):
9139                 self._shown_repos = {}
9140                 self._unknown_repo = False
9141                 repo_paths = set()
9142                 for root_config in roots.itervalues():
9143                         portdir = root_config.settings.get("PORTDIR")
9144                         if portdir:
9145                                 repo_paths.add(portdir)
9146                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9147                         if overlays:
9148                                 repo_paths.update(overlays.split())
9149                 repo_paths = list(repo_paths)
9150                 self._repo_paths = repo_paths
9151                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9152                         for repo_path in repo_paths ]
9153
9154                 # pre-allocate index for PORTDIR so that it always has index 0.
9155                 for root_config in roots.itervalues():
9156                         portdb = root_config.trees["porttree"].dbapi
9157                         portdir = portdb.porttree_root
9158                         if portdir:
9159                                 self.repoStr(portdir)
9160
9161         def repoStr(self, repo_path_real):
9162                 real_index = -1
9163                 if repo_path_real:
9164                         real_index = self._repo_paths_real.index(repo_path_real)
9165                 if real_index == -1:
9166                         s = "?"
9167                         self._unknown_repo = True
9168                 else:
9169                         shown_repos = self._shown_repos
9170                         repo_paths = self._repo_paths
9171                         repo_path = repo_paths[real_index]
9172                         index = shown_repos.get(repo_path)
9173                         if index is None:
9174                                 index = len(shown_repos)
9175                                 shown_repos[repo_path] = index
9176                         s = str(index)
9177                 return s
9178
9179         def __str__(self):
9180                 output = []
9181                 shown_repos = self._shown_repos
9182                 unknown_repo = self._unknown_repo
9183                 if shown_repos or self._unknown_repo:
9184                         output.append("Portage tree and overlays:\n")
9185                 show_repo_paths = list(shown_repos)
9186                 for repo_path, repo_index in shown_repos.iteritems():
9187                         show_repo_paths[repo_index] = repo_path
9188                 if show_repo_paths:
9189                         for index, repo_path in enumerate(show_repo_paths):
9190                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9191                 if unknown_repo:
9192                         output.append(" "+teal("[?]") + \
9193                                 " indicates that the source repository could not be determined\n")
9194                 return "".join(output)
9195
9196 class PackageCounters(object):
9197
9198         def __init__(self):
9199                 self.upgrades   = 0
9200                 self.downgrades = 0
9201                 self.new        = 0
9202                 self.newslot    = 0
9203                 self.reinst     = 0
9204                 self.uninst     = 0
9205                 self.blocks     = 0
9206                 self.blocks_satisfied         = 0
9207                 self.totalsize  = 0
9208                 self.restrict_fetch           = 0
9209                 self.restrict_fetch_satisfied = 0
9210                 self.interactive              = 0
9211
9212         def __str__(self):
9213                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9214                 myoutput = []
9215                 details = []
9216                 myoutput.append("Total: %s package" % total_installs)
9217                 if total_installs != 1:
9218                         myoutput.append("s")
9219                 if total_installs != 0:
9220                         myoutput.append(" (")
9221                 if self.upgrades > 0:
9222                         details.append("%s upgrade" % self.upgrades)
9223                         if self.upgrades > 1:
9224                                 details[-1] += "s"
9225                 if self.downgrades > 0:
9226                         details.append("%s downgrade" % self.downgrades)
9227                         if self.downgrades > 1:
9228                                 details[-1] += "s"
9229                 if self.new > 0:
9230                         details.append("%s new" % self.new)
9231                 if self.newslot > 0:
9232                         details.append("%s in new slot" % self.newslot)
9233                         if self.newslot > 1:
9234                                 details[-1] += "s"
9235                 if self.reinst > 0:
9236                         details.append("%s reinstall" % self.reinst)
9237                         if self.reinst > 1:
9238                                 details[-1] += "s"
9239                 if self.uninst > 0:
9240                         details.append("%s uninstall" % self.uninst)
9241                         if self.uninst > 1:
9242                                 details[-1] += "s"
9243                 if self.interactive > 0:
9244                         details.append("%s %s" % (self.interactive,
9245                                 colorize("WARN", "interactive")))
9246                 myoutput.append(", ".join(details))
9247                 if total_installs != 0:
9248                         myoutput.append(")")
9249                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9250                 if self.restrict_fetch:
9251                         myoutput.append("\nFetch Restriction: %s package" % \
9252                                 self.restrict_fetch)
9253                         if self.restrict_fetch > 1:
9254                                 myoutput.append("s")
9255                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9256                         myoutput.append(bad(" (%s unsatisfied)") % \
9257                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9258                 if self.blocks > 0:
9259                         myoutput.append("\nConflict: %s block" % \
9260                                 self.blocks)
9261                         if self.blocks > 1:
9262                                 myoutput.append("s")
9263                         if self.blocks_satisfied < self.blocks:
9264                                 myoutput.append(bad(" (%s unsatisfied)") % \
9265                                         (self.blocks - self.blocks_satisfied))
9266                 return "".join(myoutput)
9267
9268 class UseFlagDisplay(object):
9269
9270         __slots__ = ('name', 'enabled', 'forced')
9271
9272         def __init__(self, name, enabled, forced):
9273                 self.name = name
9274                 self.enabled = enabled
9275                 self.forced = forced
9276
9277         def __str__(self):
9278                 s = self.name
9279                 if self.enabled:
9280                         s = red(s)
9281                 else:
9282                         s = '-' + s
9283                         s = blue(s)
9284                 if self.forced:
9285                         s = '(%s)' % s
9286                 return s
9287
9288         def _cmp_combined(a, b):
9289                 """
9290                 Sort by name, combining enabled and disabled flags.
9291                 """
9292                 return (a.name > b.name) - (a.name < b.name)
9293
9294         sort_combined = cmp_sort_key(_cmp_combined)
9295         del _cmp_combined
9296
9297         def _cmp_separated(a, b):
9298                 """
9299                 Sort by name, separating enabled flags from disabled flags.
9300                 """
9301                 enabled_diff = b.enabled - a.enabled
9302                 if enabled_diff:
9303                         return enabled_diff
9304                 return (a.name > b.name) - (a.name < b.name)
9305
9306         sort_separated = cmp_sort_key(_cmp_separated)
9307         del _cmp_separated
9308
9309 class PollSelectAdapter(PollConstants):
9310
9311         """
9312         Use select to emulate a poll object, for
9313         systems that don't support poll().
9314         """
9315
9316         def __init__(self):
9317                 self._registered = {}
9318                 self._select_args = [[], [], []]
9319
9320         def register(self, fd, *args):
9321                 """
9322                 Only POLLIN is currently supported!
9323                 """
9324                 if len(args) > 1:
9325                         raise TypeError(
9326                                 "register expected at most 2 arguments, got " + \
9327                                 repr(1 + len(args)))
9328
9329                 eventmask = PollConstants.POLLIN | \
9330                         PollConstants.POLLPRI | PollConstants.POLLOUT
9331                 if args:
9332                         eventmask = args[0]
9333
9334                 self._registered[fd] = eventmask
9335                 self._select_args = None
9336
9337         def unregister(self, fd):
9338                 self._select_args = None
9339                 del self._registered[fd]
9340
9341         def poll(self, *args):
9342                 if len(args) > 1:
9343                         raise TypeError(
9344                                 "poll expected at most 2 arguments, got " + \
9345                                 repr(1 + len(args)))
9346
9347                 timeout = None
9348                 if args:
9349                         timeout = args[0]
9350
9351                 select_args = self._select_args
9352                 if select_args is None:
9353                         select_args = [self._registered.keys(), [], []]
9354
9355                 if timeout is not None:
9356                         select_args = select_args[:]
9357                         # Translate poll() timeout args to select() timeout args:
9358                         #
9359                         #          | units        | value(s) for indefinite block
9360                         # ---------|--------------|------------------------------
9361                         #   poll   | milliseconds | omitted, negative, or None
9362                         # ---------|--------------|------------------------------
9363                         #   select | seconds      | omitted
9364                         # ---------|--------------|------------------------------
9365
9366                         if timeout is not None and timeout < 0:
9367                                 timeout = None
9368                         if timeout is not None:
9369                                 select_args.append(timeout / 1000)
9370
9371                 select_events = select.select(*select_args)
9372                 poll_events = []
9373                 for fd in select_events[0]:
9374                         poll_events.append((fd, PollConstants.POLLIN))
9375                 return poll_events
9376
9377 class SequentialTaskQueue(SlotObject):
9378
9379         __slots__ = ("max_jobs", "running_tasks") + \
9380                 ("_dirty", "_scheduling", "_task_queue")
9381
9382         def __init__(self, **kwargs):
9383                 SlotObject.__init__(self, **kwargs)
9384                 self._task_queue = deque()
9385                 self.running_tasks = set()
9386                 if self.max_jobs is None:
9387                         self.max_jobs = 1
9388                 self._dirty = True
9389
9390         def add(self, task):
9391                 self._task_queue.append(task)
9392                 self._dirty = True
9393
9394         def addFront(self, task):
9395                 self._task_queue.appendleft(task)
9396                 self._dirty = True
9397
9398         def schedule(self):
9399
9400                 if not self._dirty:
9401                         return False
9402
9403                 if not self:
9404                         return False
9405
9406                 if self._scheduling:
9407                         # Ignore any recursive schedule() calls triggered via
9408                         # self._task_exit().
9409                         return False
9410
9411                 self._scheduling = True
9412
9413                 task_queue = self._task_queue
9414                 running_tasks = self.running_tasks
9415                 max_jobs = self.max_jobs
9416                 state_changed = False
9417
9418                 while task_queue and \
9419                         (max_jobs is True or len(running_tasks) < max_jobs):
9420                         task = task_queue.popleft()
9421                         cancelled = getattr(task, "cancelled", None)
9422                         if not cancelled:
9423                                 running_tasks.add(task)
9424                                 task.addExitListener(self._task_exit)
9425                                 task.start()
9426                         state_changed = True
9427
9428                 self._dirty = False
9429                 self._scheduling = False
9430
9431                 return state_changed
9432
9433         def _task_exit(self, task):
9434                 """
9435                 Since we can always rely on exit listeners being called, the set of
9436                 running tasks is always pruned automatically and there is never any need
9437                 to actively prune it.
9438                 """
9439                 self.running_tasks.remove(task)
9440                 if self._task_queue:
9441                         self._dirty = True
9442
9443         def clear(self):
9444                 self._task_queue.clear()
9445                 running_tasks = self.running_tasks
9446                 while running_tasks:
9447                         task = running_tasks.pop()
9448                         task.removeExitListener(self._task_exit)
9449                         task.cancel()
9450                 self._dirty = False
9451
9452         def __nonzero__(self):
9453                 return bool(self._task_queue or self.running_tasks)
9454
9455         def __len__(self):
9456                 return len(self._task_queue) + len(self.running_tasks)
9457
9458 _can_poll_device = None
9459
9460 def can_poll_device():
9461         """
9462         Test if it's possible to use poll() on a device such as a pty. This
9463         is known to fail on Darwin.
9464         @rtype: bool
9465         @returns: True if poll() on a device succeeds, False otherwise.
9466         """
9467
9468         global _can_poll_device
9469         if _can_poll_device is not None:
9470                 return _can_poll_device
9471
9472         if not hasattr(select, "poll"):
9473                 _can_poll_device = False
9474                 return _can_poll_device
9475
9476         try:
9477                 dev_null = open('/dev/null', 'rb')
9478         except IOError:
9479                 _can_poll_device = False
9480                 return _can_poll_device
9481
9482         p = select.poll()
9483         p.register(dev_null.fileno(), PollConstants.POLLIN)
9484
9485         invalid_request = False
9486         for f, event in p.poll():
9487                 if event & PollConstants.POLLNVAL:
9488                         invalid_request = True
9489                         break
9490         dev_null.close()
9491
9492         _can_poll_device = not invalid_request
9493         return _can_poll_device
9494
9495 def create_poll_instance():
9496         """
9497         Create an instance of select.poll, or an instance of
9498         PollSelectAdapter there is no poll() implementation or
9499         it is broken somehow.
9500         """
9501         if can_poll_device():
9502                 return select.poll()
9503         return PollSelectAdapter()
9504
9505 getloadavg = getattr(os, "getloadavg", None)
9506 if getloadavg is None:
9507         def getloadavg():
9508                 """
9509                 Uses /proc/loadavg to emulate os.getloadavg().
9510                 Raises OSError if the load average was unobtainable.
9511                 """
9512                 try:
9513                         loadavg_str = open('/proc/loadavg').readline()
9514                 except IOError:
9515                         # getloadavg() is only supposed to raise OSError, so convert
9516                         raise OSError('unknown')
9517                 loadavg_split = loadavg_str.split()
9518                 if len(loadavg_split) < 3:
9519                         raise OSError('unknown')
9520                 loadavg_floats = []
9521                 for i in xrange(3):
9522                         try:
9523                                 loadavg_floats.append(float(loadavg_split[i]))
9524                         except ValueError:
9525                                 raise OSError('unknown')
9526                 return tuple(loadavg_floats)
9527
9528 class PollScheduler(object):
9529
9530         class _sched_iface_class(SlotObject):
9531                 __slots__ = ("register", "schedule", "unregister")
9532
9533         def __init__(self):
9534                 self._max_jobs = 1
9535                 self._max_load = None
9536                 self._jobs = 0
9537                 self._poll_event_queue = []
9538                 self._poll_event_handlers = {}
9539                 self._poll_event_handler_ids = {}
9540                 # Increment id for each new handler.
9541                 self._event_handler_id = 0
9542                 self._poll_obj = create_poll_instance()
9543                 self._scheduling = False
9544
9545         def _schedule(self):
9546                 """
9547                 Calls _schedule_tasks() and automatically returns early from
9548                 any recursive calls to this method that the _schedule_tasks()
9549                 call might trigger. This makes _schedule() safe to call from
9550                 inside exit listeners.
9551                 """
9552                 if self._scheduling:
9553                         return False
9554                 self._scheduling = True
9555                 try:
9556                         return self._schedule_tasks()
9557                 finally:
9558                         self._scheduling = False
9559
9560         def _running_job_count(self):
9561                 return self._jobs
9562
9563         def _can_add_job(self):
9564                 max_jobs = self._max_jobs
9565                 max_load = self._max_load
9566
9567                 if self._max_jobs is not True and \
9568                         self._running_job_count() >= self._max_jobs:
9569                         return False
9570
9571                 if max_load is not None and \
9572                         (max_jobs is True or max_jobs > 1) and \
9573                         self._running_job_count() >= 1:
9574                         try:
9575                                 avg1, avg5, avg15 = getloadavg()
9576                         except OSError:
9577                                 return False
9578
9579                         if avg1 >= max_load:
9580                                 return False
9581
9582                 return True
9583
9584         def _poll(self, timeout=None):
9585                 """
9586                 All poll() calls pass through here. The poll events
9587                 are added directly to self._poll_event_queue.
9588                 In order to avoid endless blocking, this raises
9589                 StopIteration if timeout is None and there are
9590                 no file descriptors to poll.
9591                 """
9592                 if not self._poll_event_handlers:
9593                         self._schedule()
9594                         if timeout is None and \
9595                                 not self._poll_event_handlers:
9596                                 raise StopIteration(
9597                                         "timeout is None and there are no poll() event handlers")
9598
9599                 # The following error is known to occur with Linux kernel versions
9600                 # less than 2.6.24:
9601                 #
9602                 #   select.error: (4, 'Interrupted system call')
9603                 #
9604                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9605                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9606                 # without any events.
9607                 while True:
9608                         try:
9609                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9610                                 break
9611                         except select.error, e:
9612                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9613                                         level=logging.ERROR, noiselevel=-1)
9614                                 del e
9615                                 if timeout is not None:
9616                                         break
9617
9618         def _next_poll_event(self, timeout=None):
9619                 """
9620                 Since the _schedule_wait() loop is called by event
9621                 handlers from _poll_loop(), maintain a central event
9622                 queue for both of them to share events from a single
9623                 poll() call. In order to avoid endless blocking, this
9624                 raises StopIteration if timeout is None and there are
9625                 no file descriptors to poll.
9626                 """
9627                 if not self._poll_event_queue:
9628                         self._poll(timeout)
9629                 return self._poll_event_queue.pop()
9630
9631         def _poll_loop(self):
9632
9633                 event_handlers = self._poll_event_handlers
9634                 event_handled = False
9635
9636                 try:
9637                         while event_handlers:
9638                                 f, event = self._next_poll_event()
9639                                 handler, reg_id = event_handlers[f]
9640                                 handler(f, event)
9641                                 event_handled = True
9642                 except StopIteration:
9643                         event_handled = True
9644
9645                 if not event_handled:
9646                         raise AssertionError("tight loop")
9647
9648         def _schedule_yield(self):
9649                 """
9650                 Schedule for a short period of time chosen by the scheduler based
9651                 on internal state. Synchronous tasks should call this periodically
9652                 in order to allow the scheduler to service pending poll events. The
9653                 scheduler will call poll() exactly once, without blocking, and any
9654                 resulting poll events will be serviced.
9655                 """
9656                 event_handlers = self._poll_event_handlers
9657                 events_handled = 0
9658
9659                 if not event_handlers:
9660                         return bool(events_handled)
9661
9662                 if not self._poll_event_queue:
9663                         self._poll(0)
9664
9665                 try:
9666                         while event_handlers and self._poll_event_queue:
9667                                 f, event = self._next_poll_event()
9668                                 handler, reg_id = event_handlers[f]
9669                                 handler(f, event)
9670                                 events_handled += 1
9671                 except StopIteration:
9672                         events_handled += 1
9673
9674                 return bool(events_handled)
9675
9676         def _register(self, f, eventmask, handler):
9677                 """
9678                 @rtype: Integer
9679                 @return: A unique registration id, for use in schedule() or
9680                         unregister() calls.
9681                 """
9682                 if f in self._poll_event_handlers:
9683                         raise AssertionError("fd %d is already registered" % f)
9684                 self._event_handler_id += 1
9685                 reg_id = self._event_handler_id
9686                 self._poll_event_handler_ids[reg_id] = f
9687                 self._poll_event_handlers[f] = (handler, reg_id)
9688                 self._poll_obj.register(f, eventmask)
9689                 return reg_id
9690
9691         def _unregister(self, reg_id):
9692                 f = self._poll_event_handler_ids[reg_id]
9693                 self._poll_obj.unregister(f)
9694                 del self._poll_event_handlers[f]
9695                 del self._poll_event_handler_ids[reg_id]
9696
9697         def _schedule_wait(self, wait_ids):
9698                 """
9699                 Schedule until wait_id is not longer registered
9700                 for poll() events.
9701                 @type wait_id: int
9702                 @param wait_id: a task id to wait for
9703                 """
9704                 event_handlers = self._poll_event_handlers
9705                 handler_ids = self._poll_event_handler_ids
9706                 event_handled = False
9707
9708                 if isinstance(wait_ids, int):
9709                         wait_ids = frozenset([wait_ids])
9710
9711                 try:
9712                         while wait_ids.intersection(handler_ids):
9713                                 f, event = self._next_poll_event()
9714                                 handler, reg_id = event_handlers[f]
9715                                 handler(f, event)
9716                                 event_handled = True
9717                 except StopIteration:
9718                         event_handled = True
9719
9720                 return event_handled
9721
9722 class QueueScheduler(PollScheduler):
9723
9724         """
9725         Add instances of SequentialTaskQueue and then call run(). The
9726         run() method returns when no tasks remain.
9727         """
9728
9729         def __init__(self, max_jobs=None, max_load=None):
9730                 PollScheduler.__init__(self)
9731
9732                 if max_jobs is None:
9733                         max_jobs = 1
9734
9735                 self._max_jobs = max_jobs
9736                 self._max_load = max_load
9737                 self.sched_iface = self._sched_iface_class(
9738                         register=self._register,
9739                         schedule=self._schedule_wait,
9740                         unregister=self._unregister)
9741
9742                 self._queues = []
9743                 self._schedule_listeners = []
9744
9745         def add(self, q):
9746                 self._queues.append(q)
9747
9748         def remove(self, q):
9749                 self._queues.remove(q)
9750
9751         def run(self):
9752
9753                 while self._schedule():
9754                         self._poll_loop()
9755
9756                 while self._running_job_count():
9757                         self._poll_loop()
9758
9759         def _schedule_tasks(self):
9760                 """
9761                 @rtype: bool
9762                 @returns: True if there may be remaining tasks to schedule,
9763                         False otherwise.
9764                 """
9765                 while self._can_add_job():
9766                         n = self._max_jobs - self._running_job_count()
9767                         if n < 1:
9768                                 break
9769
9770                         if not self._start_next_job(n):
9771                                 return False
9772
9773                 for q in self._queues:
9774                         if q:
9775                                 return True
9776                 return False
9777
9778         def _running_job_count(self):
9779                 job_count = 0
9780                 for q in self._queues:
9781                         job_count += len(q.running_tasks)
9782                 self._jobs = job_count
9783                 return job_count
9784
9785         def _start_next_job(self, n=1):
9786                 started_count = 0
9787                 for q in self._queues:
9788                         initial_job_count = len(q.running_tasks)
9789                         q.schedule()
9790                         final_job_count = len(q.running_tasks)
9791                         if final_job_count > initial_job_count:
9792                                 started_count += (final_job_count - initial_job_count)
9793                         if started_count >= n:
9794                                 break
9795                 return started_count
9796
9797 class TaskScheduler(object):
9798
9799         """
9800         A simple way to handle scheduling of AsynchrousTask instances. Simply
9801         add tasks and call run(). The run() method returns when no tasks remain.
9802         """
9803
9804         def __init__(self, max_jobs=None, max_load=None):
9805                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9806                 self._scheduler = QueueScheduler(
9807                         max_jobs=max_jobs, max_load=max_load)
9808                 self.sched_iface = self._scheduler.sched_iface
9809                 self.run = self._scheduler.run
9810                 self._scheduler.add(self._queue)
9811
9812         def add(self, task):
9813                 self._queue.add(task)
9814
9815 class JobStatusDisplay(object):
9816
9817         _bound_properties = ("curval", "failed", "running")
9818         _jobs_column_width = 48
9819
9820         # Don't update the display unless at least this much
9821         # time has passed, in units of seconds.
9822         _min_display_latency = 2
9823
9824         _default_term_codes = {
9825                 'cr'  : '\r',
9826                 'el'  : '\x1b[K',
9827                 'nel' : '\n',
9828         }
9829
9830         _termcap_name_map = {
9831                 'carriage_return' : 'cr',
9832                 'clr_eol'         : 'el',
9833                 'newline'         : 'nel',
9834         }
9835
9836         def __init__(self, out=sys.stdout, quiet=False):
9837                 object.__setattr__(self, "out", out)
9838                 object.__setattr__(self, "quiet", quiet)
9839                 object.__setattr__(self, "maxval", 0)
9840                 object.__setattr__(self, "merges", 0)
9841                 object.__setattr__(self, "_changed", False)
9842                 object.__setattr__(self, "_displayed", False)
9843                 object.__setattr__(self, "_last_display_time", 0)
9844                 object.__setattr__(self, "width", 80)
9845                 self.reset()
9846
9847                 isatty = hasattr(out, "isatty") and out.isatty()
9848                 object.__setattr__(self, "_isatty", isatty)
9849                 if not isatty or not self._init_term():
9850                         term_codes = {}
9851                         for k, capname in self._termcap_name_map.iteritems():
9852                                 term_codes[k] = self._default_term_codes[capname]
9853                         object.__setattr__(self, "_term_codes", term_codes)
9854                 encoding = sys.getdefaultencoding()
9855                 for k, v in self._term_codes.items():
9856                         if not isinstance(v, basestring):
9857                                 self._term_codes[k] = v.decode(encoding, 'replace')
9858
9859         def _init_term(self):
9860                 """
9861                 Initialize term control codes.
9862                 @rtype: bool
9863                 @returns: True if term codes were successfully initialized,
9864                         False otherwise.
9865                 """
9866
9867                 term_type = os.environ.get("TERM", "vt100")
9868                 tigetstr = None
9869
9870                 try:
9871                         import curses
9872                         try:
9873                                 curses.setupterm(term_type, self.out.fileno())
9874                                 tigetstr = curses.tigetstr
9875                         except curses.error:
9876                                 pass
9877                 except ImportError:
9878                         pass
9879
9880                 if tigetstr is None:
9881                         return False
9882
9883                 term_codes = {}
9884                 for k, capname in self._termcap_name_map.iteritems():
9885                         code = tigetstr(capname)
9886                         if code is None:
9887                                 code = self._default_term_codes[capname]
9888                         term_codes[k] = code
9889                 object.__setattr__(self, "_term_codes", term_codes)
9890                 return True
9891
9892         def _format_msg(self, msg):
9893                 return ">>> %s" % msg
9894
9895         def _erase(self):
9896                 self.out.write(
9897                         self._term_codes['carriage_return'] + \
9898                         self._term_codes['clr_eol'])
9899                 self.out.flush()
9900                 self._displayed = False
9901
9902         def _display(self, line):
9903                 self.out.write(line)
9904                 self.out.flush()
9905                 self._displayed = True
9906
9907         def _update(self, msg):
9908
9909                 out = self.out
9910                 if not self._isatty:
9911                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9912                         self.out.flush()
9913                         self._displayed = True
9914                         return
9915
9916                 if self._displayed:
9917                         self._erase()
9918
9919                 self._display(self._format_msg(msg))
9920
9921         def displayMessage(self, msg):
9922
9923                 was_displayed = self._displayed
9924
9925                 if self._isatty and self._displayed:
9926                         self._erase()
9927
9928                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9929                 self.out.flush()
9930                 self._displayed = False
9931
9932                 if was_displayed:
9933                         self._changed = True
9934                         self.display()
9935
9936         def reset(self):
9937                 self.maxval = 0
9938                 self.merges = 0
9939                 for name in self._bound_properties:
9940                         object.__setattr__(self, name, 0)
9941
9942                 if self._displayed:
9943                         self.out.write(self._term_codes['newline'])
9944                         self.out.flush()
9945                         self._displayed = False
9946
9947         def __setattr__(self, name, value):
9948                 old_value = getattr(self, name)
9949                 if value == old_value:
9950                         return
9951                 object.__setattr__(self, name, value)
9952                 if name in self._bound_properties:
9953                         self._property_change(name, old_value, value)
9954
9955         def _property_change(self, name, old_value, new_value):
9956                 self._changed = True
9957                 self.display()
9958
9959         def _load_avg_str(self):
9960                 try:
9961                         avg = getloadavg()
9962                 except OSError:
9963                         return 'unknown'
9964
9965                 max_avg = max(avg)
9966
9967                 if max_avg < 10:
9968                         digits = 2
9969                 elif max_avg < 100:
9970                         digits = 1
9971                 else:
9972                         digits = 0
9973
9974                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9975
9976         def display(self):
9977                 """
9978                 Display status on stdout, but only if something has
9979                 changed since the last call.
9980                 """
9981
9982                 if self.quiet:
9983                         return
9984
9985                 current_time = time.time()
9986                 time_delta = current_time - self._last_display_time
9987                 if self._displayed and \
9988                         not self._changed:
9989                         if not self._isatty:
9990                                 return
9991                         if time_delta < self._min_display_latency:
9992                                 return
9993
9994                 self._last_display_time = current_time
9995                 self._changed = False
9996                 self._display_status()
9997
9998         def _display_status(self):
9999                 # Don't use len(self._completed_tasks) here since that also
10000                 # can include uninstall tasks.
10001                 curval_str = str(self.curval)
10002                 maxval_str = str(self.maxval)
10003                 running_str = str(self.running)
10004                 failed_str = str(self.failed)
10005                 load_avg_str = self._load_avg_str()
10006
10007                 color_output = StringIO()
10008                 plain_output = StringIO()
10009                 style_file = portage.output.ConsoleStyleFile(color_output)
10010                 style_file.write_listener = plain_output
10011                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10012                 style_writer.style_listener = style_file.new_styles
10013                 f = formatter.AbstractFormatter(style_writer)
10014
10015                 number_style = "INFORM"
10016                 f.add_literal_data("Jobs: ")
10017                 f.push_style(number_style)
10018                 f.add_literal_data(curval_str)
10019                 f.pop_style()
10020                 f.add_literal_data(" of ")
10021                 f.push_style(number_style)
10022                 f.add_literal_data(maxval_str)
10023                 f.pop_style()
10024                 f.add_literal_data(" complete")
10025
10026                 if self.running:
10027                         f.add_literal_data(", ")
10028                         f.push_style(number_style)
10029                         f.add_literal_data(running_str)
10030                         f.pop_style()
10031                         f.add_literal_data(" running")
10032
10033                 if self.failed:
10034                         f.add_literal_data(", ")
10035                         f.push_style(number_style)
10036                         f.add_literal_data(failed_str)
10037                         f.pop_style()
10038                         f.add_literal_data(" failed")
10039
10040                 padding = self._jobs_column_width - len(plain_output.getvalue())
10041                 if padding > 0:
10042                         f.add_literal_data(padding * " ")
10043
10044                 f.add_literal_data("Load avg: ")
10045                 f.add_literal_data(load_avg_str)
10046
10047                 # Truncate to fit width, to avoid making the terminal scroll if the
10048                 # line overflows (happens when the load average is large).
10049                 plain_output = plain_output.getvalue()
10050                 if self._isatty and len(plain_output) > self.width:
10051                         # Use plain_output here since it's easier to truncate
10052                         # properly than the color output which contains console
10053                         # color codes.
10054                         self._update(plain_output[:self.width])
10055                 else:
10056                         self._update(color_output.getvalue())
10057
10058                 xtermTitle(" ".join(plain_output.split()))
10059
10060 class Scheduler(PollScheduler):
10061
10062         _opts_ignore_blockers = \
10063                 frozenset(["--buildpkgonly",
10064                 "--fetchonly", "--fetch-all-uri",
10065                 "--nodeps", "--pretend"])
10066
10067         _opts_no_background = \
10068                 frozenset(["--pretend",
10069                 "--fetchonly", "--fetch-all-uri"])
10070
10071         _opts_no_restart = frozenset(["--buildpkgonly",
10072                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10073
10074         _bad_resume_opts = set(["--ask", "--changelog",
10075                 "--resume", "--skipfirst"])
10076
10077         _fetch_log = "/var/log/emerge-fetch.log"
10078
10079         class _iface_class(SlotObject):
10080                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10081                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10082                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10083                         "unregister")
10084
10085         class _fetch_iface_class(SlotObject):
10086                 __slots__ = ("log_file", "schedule")
10087
10088         _task_queues_class = slot_dict_class(
10089                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10090
10091         class _build_opts_class(SlotObject):
10092                 __slots__ = ("buildpkg", "buildpkgonly",
10093                         "fetch_all_uri", "fetchonly", "pretend")
10094
10095         class _binpkg_opts_class(SlotObject):
10096                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10097
10098         class _pkg_count_class(SlotObject):
10099                 __slots__ = ("curval", "maxval")
10100
10101         class _emerge_log_class(SlotObject):
10102                 __slots__ = ("xterm_titles",)
10103
10104                 def log(self, *pargs, **kwargs):
10105                         if not self.xterm_titles:
10106                                 # Avoid interference with the scheduler's status display.
10107                                 kwargs.pop("short_msg", None)
10108                         emergelog(self.xterm_titles, *pargs, **kwargs)
10109
10110         class _failed_pkg(SlotObject):
10111                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10112
10113         class _ConfigPool(object):
10114                 """Interface for a task to temporarily allocate a config
10115                 instance from a pool. This allows a task to be constructed
10116                 long before the config instance actually becomes needed, like
10117                 when prefetchers are constructed for the whole merge list."""
10118                 __slots__ = ("_root", "_allocate", "_deallocate")
10119                 def __init__(self, root, allocate, deallocate):
10120                         self._root = root
10121                         self._allocate = allocate
10122                         self._deallocate = deallocate
10123                 def allocate(self):
10124                         return self._allocate(self._root)
10125                 def deallocate(self, settings):
10126                         self._deallocate(settings)
10127
10128         class _unknown_internal_error(portage.exception.PortageException):
10129                 """
10130                 Used internally to terminate scheduling. The specific reason for
10131                 the failure should have been dumped to stderr.
10132                 """
10133                 def __init__(self, value=""):
10134                         portage.exception.PortageException.__init__(self, value)
10135
10136         def __init__(self, settings, trees, mtimedb, myopts,
10137                 spinner, mergelist, favorites, digraph):
10138                 PollScheduler.__init__(self)
10139                 self.settings = settings
10140                 self.target_root = settings["ROOT"]
10141                 self.trees = trees
10142                 self.myopts = myopts
10143                 self._spinner = spinner
10144                 self._mtimedb = mtimedb
10145                 self._mergelist = mergelist
10146                 self._favorites = favorites
10147                 self._args_set = InternalPackageSet(favorites)
10148                 self._build_opts = self._build_opts_class()
10149                 for k in self._build_opts.__slots__:
10150                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10151                 self._binpkg_opts = self._binpkg_opts_class()
10152                 for k in self._binpkg_opts.__slots__:
10153                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10154
10155                 self.curval = 0
10156                 self._logger = self._emerge_log_class()
10157                 self._task_queues = self._task_queues_class()
10158                 for k in self._task_queues.allowed_keys:
10159                         setattr(self._task_queues, k,
10160                                 SequentialTaskQueue())
10161
10162                 # Holds merges that will wait to be executed when no builds are
10163                 # executing. This is useful for system packages since dependencies
10164                 # on system packages are frequently unspecified.
10165                 self._merge_wait_queue = []
10166                 # Holds merges that have been transfered from the merge_wait_queue to
10167                 # the actual merge queue. They are removed from this list upon
10168                 # completion. Other packages can start building only when this list is
10169                 # empty.
10170                 self._merge_wait_scheduled = []
10171
10172                 # Holds system packages and their deep runtime dependencies. Before
10173                 # being merged, these packages go to merge_wait_queue, to be merged
10174                 # when no other packages are building.
10175                 self._deep_system_deps = set()
10176
10177                 # Holds packages to merge which will satisfy currently unsatisfied
10178                 # deep runtime dependencies of system packages. If this is not empty
10179                 # then no parallel builds will be spawned until it is empty. This
10180                 # minimizes the possibility that a build will fail due to the system
10181                 # being in a fragile state. For example, see bug #259954.
10182                 self._unsatisfied_system_deps = set()
10183
10184                 self._status_display = JobStatusDisplay()
10185                 self._max_load = myopts.get("--load-average")
10186                 max_jobs = myopts.get("--jobs")
10187                 if max_jobs is None:
10188                         max_jobs = 1
10189                 self._set_max_jobs(max_jobs)
10190
10191                 # The root where the currently running
10192                 # portage instance is installed.
10193                 self._running_root = trees["/"]["root_config"]
10194                 self.edebug = 0
10195                 if settings.get("PORTAGE_DEBUG", "") == "1":
10196                         self.edebug = 1
10197                 self.pkgsettings = {}
10198                 self._config_pool = {}
10199                 self._blocker_db = {}
10200                 for root in trees:
10201                         self._config_pool[root] = []
10202                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10203
10204                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10205                         schedule=self._schedule_fetch)
10206                 self._sched_iface = self._iface_class(
10207                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10208                         dblinkDisplayMerge=self._dblink_display_merge,
10209                         dblinkElog=self._dblink_elog,
10210                         dblinkEmergeLog=self._dblink_emerge_log,
10211                         fetch=fetch_iface, register=self._register,
10212                         schedule=self._schedule_wait,
10213                         scheduleSetup=self._schedule_setup,
10214                         scheduleUnpack=self._schedule_unpack,
10215                         scheduleYield=self._schedule_yield,
10216                         unregister=self._unregister)
10217
10218                 self._prefetchers = weakref.WeakValueDictionary()
10219                 self._pkg_queue = []
10220                 self._completed_tasks = set()
10221
10222                 self._failed_pkgs = []
10223                 self._failed_pkgs_all = []
10224                 self._failed_pkgs_die_msgs = []
10225                 self._post_mod_echo_msgs = []
10226                 self._parallel_fetch = False
10227                 merge_count = len([x for x in mergelist \
10228                         if isinstance(x, Package) and x.operation == "merge"])
10229                 self._pkg_count = self._pkg_count_class(
10230                         curval=0, maxval=merge_count)
10231                 self._status_display.maxval = self._pkg_count.maxval
10232
10233                 # The load average takes some time to respond when new
10234                 # jobs are added, so we need to limit the rate of adding
10235                 # new jobs.
10236                 self._job_delay_max = 10
10237                 self._job_delay_factor = 1.0
10238                 self._job_delay_exp = 1.5
10239                 self._previous_job_start_time = None
10240
10241                 self._set_digraph(digraph)
10242
10243                 # This is used to memoize the _choose_pkg() result when
10244                 # no packages can be chosen until one of the existing
10245                 # jobs completes.
10246                 self._choose_pkg_return_early = False
10247
10248                 features = self.settings.features
10249                 if "parallel-fetch" in features and \
10250                         not ("--pretend" in self.myopts or \
10251                         "--fetch-all-uri" in self.myopts or \
10252                         "--fetchonly" in self.myopts):
10253                         if "distlocks" not in features:
10254                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10255                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10256                                         "requires the distlocks feature enabled"+"\n",
10257                                         noiselevel=-1)
10258                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10259                                         "thus parallel-fetching is being disabled"+"\n",
10260                                         noiselevel=-1)
10261                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10262                         elif len(mergelist) > 1:
10263                                 self._parallel_fetch = True
10264
10265                 if self._parallel_fetch:
10266                                 # clear out existing fetch log if it exists
10267                                 try:
10268                                         open(self._fetch_log, 'w')
10269                                 except EnvironmentError:
10270                                         pass
10271
10272                 self._running_portage = None
10273                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10274                         portage.const.PORTAGE_PACKAGE_ATOM)
10275                 if portage_match:
10276                         cpv = portage_match.pop()
10277                         self._running_portage = self._pkg(cpv, "installed",
10278                                 self._running_root, installed=True)
10279
10280         def _poll(self, timeout=None):
10281                 self._schedule()
10282                 PollScheduler._poll(self, timeout=timeout)
10283
10284         def _set_max_jobs(self, max_jobs):
10285                 self._max_jobs = max_jobs
10286                 self._task_queues.jobs.max_jobs = max_jobs
10287
10288         def _background_mode(self):
10289                 """
10290                 Check if background mode is enabled and adjust states as necessary.
10291
10292                 @rtype: bool
10293                 @returns: True if background mode is enabled, False otherwise.
10294                 """
10295                 background = (self._max_jobs is True or \
10296                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10297                         not bool(self._opts_no_background.intersection(self.myopts))
10298
10299                 if background:
10300                         interactive_tasks = self._get_interactive_tasks()
10301                         if interactive_tasks:
10302                                 background = False
10303                                 writemsg_level(">>> Sending package output to stdio due " + \
10304                                         "to interactive package(s):\n",
10305                                         level=logging.INFO, noiselevel=-1)
10306                                 msg = [""]
10307                                 for pkg in interactive_tasks:
10308                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10309                                         if pkg.root != "/":
10310                                                 pkg_str += " for " + pkg.root
10311                                         msg.append(pkg_str)
10312                                 msg.append("")
10313                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10314                                         level=logging.INFO, noiselevel=-1)
10315                                 if self._max_jobs is True or self._max_jobs > 1:
10316                                         self._set_max_jobs(1)
10317                                         writemsg_level(">>> Setting --jobs=1 due " + \
10318                                                 "to the above interactive package(s)\n",
10319                                                 level=logging.INFO, noiselevel=-1)
10320
10321                 self._status_display.quiet = \
10322                         not background or \
10323                         ("--quiet" in self.myopts and \
10324                         "--verbose" not in self.myopts)
10325
10326                 self._logger.xterm_titles = \
10327                         "notitles" not in self.settings.features and \
10328                         self._status_display.quiet
10329
10330                 return background
10331
10332         def _get_interactive_tasks(self):
10333                 from portage import flatten
10334                 from portage.dep import use_reduce, paren_reduce
10335                 interactive_tasks = []
10336                 for task in self._mergelist:
10337                         if not (isinstance(task, Package) and \
10338                                 task.operation == "merge"):
10339                                 continue
10340                         try:
10341                                 properties = flatten(use_reduce(paren_reduce(
10342                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10343                         except portage.exception.InvalidDependString, e:
10344                                 show_invalid_depstring_notice(task,
10345                                         task.metadata["PROPERTIES"], str(e))
10346                                 raise self._unknown_internal_error()
10347                         if "interactive" in properties:
10348                                 interactive_tasks.append(task)
10349                 return interactive_tasks
10350
10351         def _set_digraph(self, digraph):
10352                 if "--nodeps" in self.myopts or \
10353                         (self._max_jobs is not True and self._max_jobs < 2):
10354                         # save some memory
10355                         self._digraph = None
10356                         return
10357
10358                 self._digraph = digraph
10359                 self._find_system_deps()
10360                 self._prune_digraph()
10361                 self._prevent_builddir_collisions()
10362
10363         def _find_system_deps(self):
10364                 """
10365                 Find system packages and their deep runtime dependencies. Before being
10366                 merged, these packages go to merge_wait_queue, to be merged when no
10367                 other packages are building.
10368                 """
10369                 deep_system_deps = self._deep_system_deps
10370                 deep_system_deps.clear()
10371                 deep_system_deps.update(
10372                         _find_deep_system_runtime_deps(self._digraph))
10373                 deep_system_deps.difference_update([pkg for pkg in \
10374                         deep_system_deps if pkg.operation != "merge"])
10375
10376         def _prune_digraph(self):
10377                 """
10378                 Prune any root nodes that are irrelevant.
10379                 """
10380
10381                 graph = self._digraph
10382                 completed_tasks = self._completed_tasks
10383                 removed_nodes = set()
10384                 while True:
10385                         for node in graph.root_nodes():
10386                                 if not isinstance(node, Package) or \
10387                                         (node.installed and node.operation == "nomerge") or \
10388                                         node.onlydeps or \
10389                                         node in completed_tasks:
10390                                         removed_nodes.add(node)
10391                         if removed_nodes:
10392                                 graph.difference_update(removed_nodes)
10393                         if not removed_nodes:
10394                                 break
10395                         removed_nodes.clear()
10396
10397         def _prevent_builddir_collisions(self):
10398                 """
10399                 When building stages, sometimes the same exact cpv needs to be merged
10400                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10401                 in the builddir. Currently, normal file locks would be inappropriate
10402                 for this purpose since emerge holds all of it's build dir locks from
10403                 the main process.
10404                 """
10405                 cpv_map = {}
10406                 for pkg in self._mergelist:
10407                         if not isinstance(pkg, Package):
10408                                 # a satisfied blocker
10409                                 continue
10410                         if pkg.installed:
10411                                 continue
10412                         if pkg.cpv not in cpv_map:
10413                                 cpv_map[pkg.cpv] = [pkg]
10414                                 continue
10415                         for earlier_pkg in cpv_map[pkg.cpv]:
10416                                 self._digraph.add(earlier_pkg, pkg,
10417                                         priority=DepPriority(buildtime=True))
10418                         cpv_map[pkg.cpv].append(pkg)
10419
10420         class _pkg_failure(portage.exception.PortageException):
10421                 """
10422                 An instance of this class is raised by unmerge() when
10423                 an uninstallation fails.
10424                 """
10425                 status = 1
10426                 def __init__(self, *pargs):
10427                         portage.exception.PortageException.__init__(self, pargs)
10428                         if pargs:
10429                                 self.status = pargs[0]
10430
10431         def _schedule_fetch(self, fetcher):
10432                 """
10433                 Schedule a fetcher on the fetch queue, in order to
10434                 serialize access to the fetch log.
10435                 """
10436                 self._task_queues.fetch.addFront(fetcher)
10437
10438         def _schedule_setup(self, setup_phase):
10439                 """
10440                 Schedule a setup phase on the merge queue, in order to
10441                 serialize unsandboxed access to the live filesystem.
10442                 """
10443                 self._task_queues.merge.addFront(setup_phase)
10444                 self._schedule()
10445
10446         def _schedule_unpack(self, unpack_phase):
10447                 """
10448                 Schedule an unpack phase on the unpack queue, in order
10449                 to serialize $DISTDIR access for live ebuilds.
10450                 """
10451                 self._task_queues.unpack.add(unpack_phase)
10452
10453         def _find_blockers(self, new_pkg):
10454                 """
10455                 Returns a callable which should be called only when
10456                 the vdb lock has been acquired.
10457                 """
10458                 def get_blockers():
10459                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10460                 return get_blockers
10461
10462         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10463                 if self._opts_ignore_blockers.intersection(self.myopts):
10464                         return None
10465
10466                 # Call gc.collect() here to avoid heap overflow that
10467                 # triggers 'Cannot allocate memory' errors (reported
10468                 # with python-2.5).
10469                 import gc
10470                 gc.collect()
10471
10472                 blocker_db = self._blocker_db[new_pkg.root]
10473
10474                 blocker_dblinks = []
10475                 for blocking_pkg in blocker_db.findInstalledBlockers(
10476                         new_pkg, acquire_lock=acquire_lock):
10477                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10478                                 continue
10479                         if new_pkg.cpv == blocking_pkg.cpv:
10480                                 continue
10481                         blocker_dblinks.append(portage.dblink(
10482                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10483                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10484                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10485
10486                 gc.collect()
10487
10488                 return blocker_dblinks
10489
10490         def _dblink_pkg(self, pkg_dblink):
10491                 cpv = pkg_dblink.mycpv
10492                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10493                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10494                 installed = type_name == "installed"
10495                 return self._pkg(cpv, type_name, root_config, installed=installed)
10496
10497         def _append_to_log_path(self, log_path, msg):
10498                 f = open(log_path, 'a')
10499                 try:
10500                         f.write(msg)
10501                 finally:
10502                         f.close()
10503
10504         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10505
10506                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10507                 log_file = None
10508                 out = sys.stdout
10509                 background = self._background
10510
10511                 if background and log_path is not None:
10512                         log_file = open(log_path, 'a')
10513                         out = log_file
10514
10515                 try:
10516                         for msg in msgs:
10517                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10518                 finally:
10519                         if log_file is not None:
10520                                 log_file.close()
10521
10522         def _dblink_emerge_log(self, msg):
10523                 self._logger.log(msg)
10524
10525         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10526                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10527                 background = self._background
10528
10529                 if log_path is None:
10530                         if not (background and level < logging.WARN):
10531                                 portage.util.writemsg_level(msg,
10532                                         level=level, noiselevel=noiselevel)
10533                 else:
10534                         if not background:
10535                                 portage.util.writemsg_level(msg,
10536                                         level=level, noiselevel=noiselevel)
10537                         self._append_to_log_path(log_path, msg)
10538
10539         def _dblink_ebuild_phase(self,
10540                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10541                 """
10542                 Using this callback for merge phases allows the scheduler
10543                 to run while these phases execute asynchronously, and allows
10544                 the scheduler control output handling.
10545                 """
10546
10547                 scheduler = self._sched_iface
10548                 settings = pkg_dblink.settings
10549                 pkg = self._dblink_pkg(pkg_dblink)
10550                 background = self._background
10551                 log_path = settings.get("PORTAGE_LOG_FILE")
10552
10553                 ebuild_phase = EbuildPhase(background=background,
10554                         pkg=pkg, phase=phase, scheduler=scheduler,
10555                         settings=settings, tree=pkg_dblink.treetype)
10556                 ebuild_phase.start()
10557                 ebuild_phase.wait()
10558
10559                 return ebuild_phase.returncode
10560
10561         def _generate_digests(self):
10562                 """
10563                 Generate digests if necessary for --digests or FEATURES=digest.
10564                 In order to avoid interference, this must done before parallel
10565                 tasks are started.
10566                 """
10567
10568                 if '--fetchonly' in self.myopts:
10569                         return os.EX_OK
10570
10571                 digest = '--digest' in self.myopts
10572                 if not digest:
10573                         for pkgsettings in self.pkgsettings.itervalues():
10574                                 if 'digest' in pkgsettings.features:
10575                                         digest = True
10576                                         break
10577
10578                 if not digest:
10579                         return os.EX_OK
10580
10581                 for x in self._mergelist:
10582                         if not isinstance(x, Package) or \
10583                                 x.type_name != 'ebuild' or \
10584                                 x.operation != 'merge':
10585                                 continue
10586                         pkgsettings = self.pkgsettings[x.root]
10587                         if '--digest' not in self.myopts and \
10588                                 'digest' not in pkgsettings.features:
10589                                 continue
10590                         portdb = x.root_config.trees['porttree'].dbapi
10591                         ebuild_path = portdb.findname(x.cpv)
10592                         if not ebuild_path:
10593                                 writemsg_level(
10594                                         "!!! Could not locate ebuild for '%s'.\n" \
10595                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10596                                 return 1
10597                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10598                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10599                                 writemsg_level(
10600                                         "!!! Unable to generate manifest for '%s'.\n" \
10601                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10602                                 return 1
10603
10604                 return os.EX_OK
10605
10606         def _check_manifests(self):
10607                 # Verify all the manifests now so that the user is notified of failure
10608                 # as soon as possible.
10609                 if "strict" not in self.settings.features or \
10610                         "--fetchonly" in self.myopts or \
10611                         "--fetch-all-uri" in self.myopts:
10612                         return os.EX_OK
10613
10614                 shown_verifying_msg = False
10615                 quiet_settings = {}
10616                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10617                         quiet_config = portage.config(clone=pkgsettings)
10618                         quiet_config["PORTAGE_QUIET"] = "1"
10619                         quiet_config.backup_changes("PORTAGE_QUIET")
10620                         quiet_settings[myroot] = quiet_config
10621                         del quiet_config
10622
10623                 for x in self._mergelist:
10624                         if not isinstance(x, Package) or \
10625                                 x.type_name != "ebuild":
10626                                 continue
10627
10628                         if not shown_verifying_msg:
10629                                 shown_verifying_msg = True
10630                                 self._status_msg("Verifying ebuild manifests")
10631
10632                         root_config = x.root_config
10633                         portdb = root_config.trees["porttree"].dbapi
10634                         quiet_config = quiet_settings[root_config.root]
10635                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10636                         if not portage.digestcheck([], quiet_config, strict=True):
10637                                 return 1
10638
10639                 return os.EX_OK
10640
10641         def _add_prefetchers(self):
10642
10643                 if not self._parallel_fetch:
10644                         return
10645
10646                 if self._parallel_fetch:
10647                         self._status_msg("Starting parallel fetch")
10648
10649                         prefetchers = self._prefetchers
10650                         getbinpkg = "--getbinpkg" in self.myopts
10651
10652                         # In order to avoid "waiting for lock" messages
10653                         # at the beginning, which annoy users, never
10654                         # spawn a prefetcher for the first package.
10655                         for pkg in self._mergelist[1:]:
10656                                 prefetcher = self._create_prefetcher(pkg)
10657                                 if prefetcher is not None:
10658                                         self._task_queues.fetch.add(prefetcher)
10659                                         prefetchers[pkg] = prefetcher
10660
10661         def _create_prefetcher(self, pkg):
10662                 """
10663                 @return: a prefetcher, or None if not applicable
10664                 """
10665                 prefetcher = None
10666
10667                 if not isinstance(pkg, Package):
10668                         pass
10669
10670                 elif pkg.type_name == "ebuild":
10671
10672                         prefetcher = EbuildFetcher(background=True,
10673                                 config_pool=self._ConfigPool(pkg.root,
10674                                 self._allocate_config, self._deallocate_config),
10675                                 fetchonly=1, logfile=self._fetch_log,
10676                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10677
10678                 elif pkg.type_name == "binary" and \
10679                         "--getbinpkg" in self.myopts and \
10680                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10681
10682                         prefetcher = BinpkgPrefetcher(background=True,
10683                                 pkg=pkg, scheduler=self._sched_iface)
10684
10685                 return prefetcher
10686
10687         def _is_restart_scheduled(self):
10688                 """
10689                 Check if the merge list contains a replacement
10690                 for the current running instance, that will result
10691                 in restart after merge.
10692                 @rtype: bool
10693                 @returns: True if a restart is scheduled, False otherwise.
10694                 """
10695                 if self._opts_no_restart.intersection(self.myopts):
10696                         return False
10697
10698                 mergelist = self._mergelist
10699
10700                 for i, pkg in enumerate(mergelist):
10701                         if self._is_restart_necessary(pkg) and \
10702                                 i != len(mergelist) - 1:
10703                                 return True
10704
10705                 return False
10706
10707         def _is_restart_necessary(self, pkg):
10708                 """
10709                 @return: True if merging the given package
10710                         requires restart, False otherwise.
10711                 """
10712
10713                 # Figure out if we need a restart.
10714                 if pkg.root == self._running_root.root and \
10715                         portage.match_from_list(
10716                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10717                         if self._running_portage:
10718                                 return pkg.cpv != self._running_portage.cpv
10719                         return True
10720                 return False
10721
10722         def _restart_if_necessary(self, pkg):
10723                 """
10724                 Use execv() to restart emerge. This happens
10725                 if portage upgrades itself and there are
10726                 remaining packages in the list.
10727                 """
10728
10729                 if self._opts_no_restart.intersection(self.myopts):
10730                         return
10731
10732                 if not self._is_restart_necessary(pkg):
10733                         return
10734
10735                 if pkg == self._mergelist[-1]:
10736                         return
10737
10738                 self._main_loop_cleanup()
10739
10740                 logger = self._logger
10741                 pkg_count = self._pkg_count
10742                 mtimedb = self._mtimedb
10743                 bad_resume_opts = self._bad_resume_opts
10744
10745                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10746                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10747
10748                 logger.log(" *** RESTARTING " + \
10749                         "emerge via exec() after change of " + \
10750                         "portage version.")
10751
10752                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10753                 mtimedb.commit()
10754                 portage.run_exitfuncs()
10755                 mynewargv = [sys.argv[0], "--resume"]
10756                 resume_opts = self.myopts.copy()
10757                 # For automatic resume, we need to prevent
10758                 # any of bad_resume_opts from leaking in
10759                 # via EMERGE_DEFAULT_OPTS.
10760                 resume_opts["--ignore-default-opts"] = True
10761                 for myopt, myarg in resume_opts.iteritems():
10762                         if myopt not in bad_resume_opts:
10763                                 if myarg is True:
10764                                         mynewargv.append(myopt)
10765                                 else:
10766                                         mynewargv.append(myopt +"="+ str(myarg))
10767                 # priority only needs to be adjusted on the first run
10768                 os.environ["PORTAGE_NICENESS"] = "0"
10769                 os.execv(mynewargv[0], mynewargv)
10770
10771         def merge(self):
10772
10773                 if "--resume" in self.myopts:
10774                         # We're resuming.
10775                         portage.writemsg_stdout(
10776                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10777                         self._logger.log(" *** Resuming merge...")
10778
10779                 self._save_resume_list()
10780
10781                 try:
10782                         self._background = self._background_mode()
10783                 except self._unknown_internal_error:
10784                         return 1
10785
10786                 for root in self.trees:
10787                         root_config = self.trees[root]["root_config"]
10788
10789                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10790                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10791                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10792                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10793                         if not tmpdir or not os.path.isdir(tmpdir):
10794                                 msg = "The directory specified in your " + \
10795                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10796                                 "does not exist. Please create this " + \
10797                                 "directory or correct your PORTAGE_TMPDIR setting."
10798                                 msg = textwrap.wrap(msg, 70)
10799                                 out = portage.output.EOutput()
10800                                 for l in msg:
10801                                         out.eerror(l)
10802                                 return 1
10803
10804                         if self._background:
10805                                 root_config.settings.unlock()
10806                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10807                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10808                                 root_config.settings.lock()
10809
10810                         self.pkgsettings[root] = portage.config(
10811                                 clone=root_config.settings)
10812
10813                 rval = self._generate_digests()
10814                 if rval != os.EX_OK:
10815                         return rval
10816
10817                 rval = self._check_manifests()
10818                 if rval != os.EX_OK:
10819                         return rval
10820
10821                 keep_going = "--keep-going" in self.myopts
10822                 fetchonly = self._build_opts.fetchonly
10823                 mtimedb = self._mtimedb
10824                 failed_pkgs = self._failed_pkgs
10825
10826                 while True:
10827                         rval = self._merge()
10828                         if rval == os.EX_OK or fetchonly or not keep_going:
10829                                 break
10830                         if "resume" not in mtimedb:
10831                                 break
10832                         mergelist = self._mtimedb["resume"].get("mergelist")
10833                         if not mergelist:
10834                                 break
10835
10836                         if not failed_pkgs:
10837                                 break
10838
10839                         for failed_pkg in failed_pkgs:
10840                                 mergelist.remove(list(failed_pkg.pkg))
10841
10842                         self._failed_pkgs_all.extend(failed_pkgs)
10843                         del failed_pkgs[:]
10844
10845                         if not mergelist:
10846                                 break
10847
10848                         if not self._calc_resume_list():
10849                                 break
10850
10851                         clear_caches(self.trees)
10852                         if not self._mergelist:
10853                                 break
10854
10855                         self._save_resume_list()
10856                         self._pkg_count.curval = 0
10857                         self._pkg_count.maxval = len([x for x in self._mergelist \
10858                                 if isinstance(x, Package) and x.operation == "merge"])
10859                         self._status_display.maxval = self._pkg_count.maxval
10860
10861                 self._logger.log(" *** Finished. Cleaning up...")
10862
10863                 if failed_pkgs:
10864                         self._failed_pkgs_all.extend(failed_pkgs)
10865                         del failed_pkgs[:]
10866
10867                 background = self._background
10868                 failure_log_shown = False
10869                 if background and len(self._failed_pkgs_all) == 1:
10870                         # If only one package failed then just show it's
10871                         # whole log for easy viewing.
10872                         failed_pkg = self._failed_pkgs_all[-1]
10873                         build_dir = failed_pkg.build_dir
10874                         log_file = None
10875
10876                         log_paths = [failed_pkg.build_log]
10877
10878                         log_path = self._locate_failure_log(failed_pkg)
10879                         if log_path is not None:
10880                                 try:
10881                                         log_file = open(log_path)
10882                                 except IOError:
10883                                         pass
10884
10885                         if log_file is not None:
10886                                 try:
10887                                         for line in log_file:
10888                                                 writemsg_level(line, noiselevel=-1)
10889                                 finally:
10890                                         log_file.close()
10891                                 failure_log_shown = True
10892
10893                 # Dump mod_echo output now since it tends to flood the terminal.
10894                 # This allows us to avoid having more important output, generated
10895                 # later, from being swept away by the mod_echo output.
10896                 mod_echo_output =  _flush_elog_mod_echo()
10897
10898                 if background and not failure_log_shown and \
10899                         self._failed_pkgs_all and \
10900                         self._failed_pkgs_die_msgs and \
10901                         not mod_echo_output:
10902
10903                         printer = portage.output.EOutput()
10904                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10905                                 root_msg = ""
10906                                 if mysettings["ROOT"] != "/":
10907                                         root_msg = " merged to %s" % mysettings["ROOT"]
10908                                 print
10909                                 printer.einfo("Error messages for package %s%s:" % \
10910                                         (colorize("INFORM", key), root_msg))
10911                                 print
10912                                 for phase in portage.const.EBUILD_PHASES:
10913                                         if phase not in logentries:
10914                                                 continue
10915                                         for msgtype, msgcontent in logentries[phase]:
10916                                                 if isinstance(msgcontent, basestring):
10917                                                         msgcontent = [msgcontent]
10918                                                 for line in msgcontent:
10919                                                         printer.eerror(line.strip("\n"))
10920
10921                 if self._post_mod_echo_msgs:
10922                         for msg in self._post_mod_echo_msgs:
10923                                 msg()
10924
10925                 if len(self._failed_pkgs_all) > 1 or \
10926                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10927                         if len(self._failed_pkgs_all) > 1:
10928                                 msg = "The following %d packages have " % \
10929                                         len(self._failed_pkgs_all) + \
10930                                         "failed to build or install:"
10931                         else:
10932                                 msg = "The following package has " + \
10933                                         "failed to build or install:"
10934                         prefix = bad(" * ")
10935                         writemsg(prefix + "\n", noiselevel=-1)
10936                         from textwrap import wrap
10937                         for line in wrap(msg, 72):
10938                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10939                         writemsg(prefix + "\n", noiselevel=-1)
10940                         for failed_pkg in self._failed_pkgs_all:
10941                                 writemsg("%s\t%s\n" % (prefix,
10942                                         colorize("INFORM", str(failed_pkg.pkg))),
10943                                         noiselevel=-1)
10944                         writemsg(prefix + "\n", noiselevel=-1)
10945
10946                 return rval
10947
10948         def _elog_listener(self, mysettings, key, logentries, fulltext):
10949                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10950                 if errors:
10951                         self._failed_pkgs_die_msgs.append(
10952                                 (mysettings, key, errors))
10953
10954         def _locate_failure_log(self, failed_pkg):
10955
10956                 build_dir = failed_pkg.build_dir
10957                 log_file = None
10958
10959                 log_paths = [failed_pkg.build_log]
10960
10961                 for log_path in log_paths:
10962                         if not log_path:
10963                                 continue
10964
10965                         try:
10966                                 log_size = os.stat(log_path).st_size
10967                         except OSError:
10968                                 continue
10969
10970                         if log_size == 0:
10971                                 continue
10972
10973                         return log_path
10974
10975                 return None
10976
10977         def _add_packages(self):
10978                 pkg_queue = self._pkg_queue
10979                 for pkg in self._mergelist:
10980                         if isinstance(pkg, Package):
10981                                 pkg_queue.append(pkg)
10982                         elif isinstance(pkg, Blocker):
10983                                 pass
10984
10985         def _system_merge_started(self, merge):
10986                 """
10987                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10988                 """
10989                 graph = self._digraph
10990                 if graph is None:
10991                         return
10992                 pkg = merge.merge.pkg
10993
10994                 # Skip this if $ROOT != / since it shouldn't matter if there
10995                 # are unsatisfied system runtime deps in this case.
10996                 if pkg.root != '/':
10997                         return
10998
10999                 completed_tasks = self._completed_tasks
11000                 unsatisfied = self._unsatisfied_system_deps
11001
11002                 def ignore_non_runtime_or_satisfied(priority):
11003                         """
11004                         Ignore non-runtime and satisfied runtime priorities.
11005                         """
11006                         if isinstance(priority, DepPriority) and \
11007                                 not priority.satisfied and \
11008                                 (priority.runtime or priority.runtime_post):
11009                                 return False
11010                         return True
11011
11012                 # When checking for unsatisfied runtime deps, only check
11013                 # direct deps since indirect deps are checked when the
11014                 # corresponding parent is merged.
11015                 for child in graph.child_nodes(pkg,
11016                         ignore_priority=ignore_non_runtime_or_satisfied):
11017                         if not isinstance(child, Package) or \
11018                                 child.operation == 'uninstall':
11019                                 continue
11020                         if child is pkg:
11021                                 continue
11022                         if child.operation == 'merge' and \
11023                                 child not in completed_tasks:
11024                                 unsatisfied.add(child)
11025
11026         def _merge_wait_exit_handler(self, task):
11027                 self._merge_wait_scheduled.remove(task)
11028                 self._merge_exit(task)
11029
11030         def _merge_exit(self, merge):
11031                 self._do_merge_exit(merge)
11032                 self._deallocate_config(merge.merge.settings)
11033                 if merge.returncode == os.EX_OK and \
11034                         not merge.merge.pkg.installed:
11035                         self._status_display.curval += 1
11036                 self._status_display.merges = len(self._task_queues.merge)
11037                 self._schedule()
11038
11039         def _do_merge_exit(self, merge):
11040                 pkg = merge.merge.pkg
11041                 if merge.returncode != os.EX_OK:
11042                         settings = merge.merge.settings
11043                         build_dir = settings.get("PORTAGE_BUILDDIR")
11044                         build_log = settings.get("PORTAGE_LOG_FILE")
11045
11046                         self._failed_pkgs.append(self._failed_pkg(
11047                                 build_dir=build_dir, build_log=build_log,
11048                                 pkg=pkg,
11049                                 returncode=merge.returncode))
11050                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11051
11052                         self._status_display.failed = len(self._failed_pkgs)
11053                         return
11054
11055                 self._task_complete(pkg)
11056                 pkg_to_replace = merge.merge.pkg_to_replace
11057                 if pkg_to_replace is not None:
11058                         # When a package is replaced, mark it's uninstall
11059                         # task complete (if any).
11060                         uninst_hash_key = \
11061                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11062                         self._task_complete(uninst_hash_key)
11063
11064                 if pkg.installed:
11065                         return
11066
11067                 self._restart_if_necessary(pkg)
11068
11069                 # Call mtimedb.commit() after each merge so that
11070                 # --resume still works after being interrupted
11071                 # by reboot, sigkill or similar.
11072                 mtimedb = self._mtimedb
11073                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11074                 if not mtimedb["resume"]["mergelist"]:
11075                         del mtimedb["resume"]
11076                 mtimedb.commit()
11077
11078         def _build_exit(self, build):
11079                 if build.returncode == os.EX_OK:
11080                         self.curval += 1
11081                         merge = PackageMerge(merge=build)
11082                         if not build.build_opts.buildpkgonly and \
11083                                 build.pkg in self._deep_system_deps:
11084                                 # Since dependencies on system packages are frequently
11085                                 # unspecified, merge them only when no builds are executing.
11086                                 self._merge_wait_queue.append(merge)
11087                                 merge.addStartListener(self._system_merge_started)
11088                         else:
11089                                 merge.addExitListener(self._merge_exit)
11090                                 self._task_queues.merge.add(merge)
11091                                 self._status_display.merges = len(self._task_queues.merge)
11092                 else:
11093                         settings = build.settings
11094                         build_dir = settings.get("PORTAGE_BUILDDIR")
11095                         build_log = settings.get("PORTAGE_LOG_FILE")
11096
11097                         self._failed_pkgs.append(self._failed_pkg(
11098                                 build_dir=build_dir, build_log=build_log,
11099                                 pkg=build.pkg,
11100                                 returncode=build.returncode))
11101                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11102
11103                         self._status_display.failed = len(self._failed_pkgs)
11104                         self._deallocate_config(build.settings)
11105                 self._jobs -= 1
11106                 self._status_display.running = self._jobs
11107                 self._schedule()
11108
11109         def _extract_exit(self, build):
11110                 self._build_exit(build)
11111
11112         def _task_complete(self, pkg):
11113                 self._completed_tasks.add(pkg)
11114                 self._unsatisfied_system_deps.discard(pkg)
11115                 self._choose_pkg_return_early = False
11116
11117         def _merge(self):
11118
11119                 self._add_prefetchers()
11120                 self._add_packages()
11121                 pkg_queue = self._pkg_queue
11122                 failed_pkgs = self._failed_pkgs
11123                 portage.locks._quiet = self._background
11124                 portage.elog._emerge_elog_listener = self._elog_listener
11125                 rval = os.EX_OK
11126
11127                 try:
11128                         self._main_loop()
11129                 finally:
11130                         self._main_loop_cleanup()
11131                         portage.locks._quiet = False
11132                         portage.elog._emerge_elog_listener = None
11133                         if failed_pkgs:
11134                                 rval = failed_pkgs[-1].returncode
11135
11136                 return rval
11137
11138         def _main_loop_cleanup(self):
11139                 del self._pkg_queue[:]
11140                 self._completed_tasks.clear()
11141                 self._deep_system_deps.clear()
11142                 self._unsatisfied_system_deps.clear()
11143                 self._choose_pkg_return_early = False
11144                 self._status_display.reset()
11145                 self._digraph = None
11146                 self._task_queues.fetch.clear()
11147
11148         def _choose_pkg(self):
11149                 """
11150                 Choose a task that has all it's dependencies satisfied.
11151                 """
11152
11153                 if self._choose_pkg_return_early:
11154                         return None
11155
11156                 if self._digraph is None:
11157                         if (self._jobs or self._task_queues.merge) and \
11158                                 not ("--nodeps" in self.myopts and \
11159                                 (self._max_jobs is True or self._max_jobs > 1)):
11160                                 self._choose_pkg_return_early = True
11161                                 return None
11162                         return self._pkg_queue.pop(0)
11163
11164                 if not (self._jobs or self._task_queues.merge):
11165                         return self._pkg_queue.pop(0)
11166
11167                 self._prune_digraph()
11168
11169                 chosen_pkg = None
11170                 later = set(self._pkg_queue)
11171                 for pkg in self._pkg_queue:
11172                         later.remove(pkg)
11173                         if not self._dependent_on_scheduled_merges(pkg, later):
11174                                 chosen_pkg = pkg
11175                                 break
11176
11177                 if chosen_pkg is not None:
11178                         self._pkg_queue.remove(chosen_pkg)
11179
11180                 if chosen_pkg is None:
11181                         # There's no point in searching for a package to
11182                         # choose until at least one of the existing jobs
11183                         # completes.
11184                         self._choose_pkg_return_early = True
11185
11186                 return chosen_pkg
11187
11188         def _dependent_on_scheduled_merges(self, pkg, later):
11189                 """
11190                 Traverse the subgraph of the given packages deep dependencies
11191                 to see if it contains any scheduled merges.
11192                 @param pkg: a package to check dependencies for
11193                 @type pkg: Package
11194                 @param later: packages for which dependence should be ignored
11195                         since they will be merged later than pkg anyway and therefore
11196                         delaying the merge of pkg will not result in a more optimal
11197                         merge order
11198                 @type later: set
11199                 @rtype: bool
11200                 @returns: True if the package is dependent, False otherwise.
11201                 """
11202
11203                 graph = self._digraph
11204                 completed_tasks = self._completed_tasks
11205
11206                 dependent = False
11207                 traversed_nodes = set([pkg])
11208                 direct_deps = graph.child_nodes(pkg)
11209                 node_stack = direct_deps
11210                 direct_deps = frozenset(direct_deps)
11211                 while node_stack:
11212                         node = node_stack.pop()
11213                         if node in traversed_nodes:
11214                                 continue
11215                         traversed_nodes.add(node)
11216                         if not ((node.installed and node.operation == "nomerge") or \
11217                                 (node.operation == "uninstall" and \
11218                                 node not in direct_deps) or \
11219                                 node in completed_tasks or \
11220                                 node in later):
11221                                 dependent = True
11222                                 break
11223                         node_stack.extend(graph.child_nodes(node))
11224
11225                 return dependent
11226
11227         def _allocate_config(self, root):
11228                 """
11229                 Allocate a unique config instance for a task in order
11230                 to prevent interference between parallel tasks.
11231                 """
11232                 if self._config_pool[root]:
11233                         temp_settings = self._config_pool[root].pop()
11234                 else:
11235                         temp_settings = portage.config(clone=self.pkgsettings[root])
11236                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11237                 # performance reasons, call it here to make sure all settings from the
11238                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11239                 temp_settings.reload()
11240                 temp_settings.reset()
11241                 return temp_settings
11242
11243         def _deallocate_config(self, settings):
11244                 self._config_pool[settings["ROOT"]].append(settings)
11245
11246         def _main_loop(self):
11247
11248                 # Only allow 1 job max if a restart is scheduled
11249                 # due to portage update.
11250                 if self._is_restart_scheduled() or \
11251                         self._opts_no_background.intersection(self.myopts):
11252                         self._set_max_jobs(1)
11253
11254                 merge_queue = self._task_queues.merge
11255
11256                 while self._schedule():
11257                         if self._poll_event_handlers:
11258                                 self._poll_loop()
11259
11260                 while True:
11261                         self._schedule()
11262                         if not (self._jobs or merge_queue):
11263                                 break
11264                         if self._poll_event_handlers:
11265                                 self._poll_loop()
11266
11267         def _keep_scheduling(self):
11268                 return bool(self._pkg_queue and \
11269                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11270
11271         def _schedule_tasks(self):
11272
11273                 # When the number of jobs drops to zero, process all waiting merges.
11274                 if not self._jobs and self._merge_wait_queue:
11275                         for task in self._merge_wait_queue:
11276                                 task.addExitListener(self._merge_wait_exit_handler)
11277                                 self._task_queues.merge.add(task)
11278                         self._status_display.merges = len(self._task_queues.merge)
11279                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11280                         del self._merge_wait_queue[:]
11281
11282                 self._schedule_tasks_imp()
11283                 self._status_display.display()
11284
11285                 state_change = 0
11286                 for q in self._task_queues.values():
11287                         if q.schedule():
11288                                 state_change += 1
11289
11290                 # Cancel prefetchers if they're the only reason
11291                 # the main poll loop is still running.
11292                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11293                         not (self._jobs or self._task_queues.merge) and \
11294                         self._task_queues.fetch:
11295                         self._task_queues.fetch.clear()
11296                         state_change += 1
11297
11298                 if state_change:
11299                         self._schedule_tasks_imp()
11300                         self._status_display.display()
11301
11302                 return self._keep_scheduling()
11303
11304         def _job_delay(self):
11305                 """
11306                 @rtype: bool
11307                 @returns: True if job scheduling should be delayed, False otherwise.
11308                 """
11309
11310                 if self._jobs and self._max_load is not None:
11311
11312                         current_time = time.time()
11313
11314                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11315                         if delay > self._job_delay_max:
11316                                 delay = self._job_delay_max
11317                         if (current_time - self._previous_job_start_time) < delay:
11318                                 return True
11319
11320                 return False
11321
11322         def _schedule_tasks_imp(self):
11323                 """
11324                 @rtype: bool
11325                 @returns: True if state changed, False otherwise.
11326                 """
11327
11328                 state_change = 0
11329
11330                 while True:
11331
11332                         if not self._keep_scheduling():
11333                                 return bool(state_change)
11334
11335                         if self._choose_pkg_return_early or \
11336                                 self._merge_wait_scheduled or \
11337                                 (self._jobs and self._unsatisfied_system_deps) or \
11338                                 not self._can_add_job() or \
11339                                 self._job_delay():
11340                                 return bool(state_change)
11341
11342                         pkg = self._choose_pkg()
11343                         if pkg is None:
11344                                 return bool(state_change)
11345
11346                         state_change += 1
11347
11348                         if not pkg.installed:
11349                                 self._pkg_count.curval += 1
11350
11351                         task = self._task(pkg)
11352
11353                         if pkg.installed:
11354                                 merge = PackageMerge(merge=task)
11355                                 merge.addExitListener(self._merge_exit)
11356                                 self._task_queues.merge.add(merge)
11357
11358                         elif pkg.built:
11359                                 self._jobs += 1
11360                                 self._previous_job_start_time = time.time()
11361                                 self._status_display.running = self._jobs
11362                                 task.addExitListener(self._extract_exit)
11363                                 self._task_queues.jobs.add(task)
11364
11365                         else:
11366                                 self._jobs += 1
11367                                 self._previous_job_start_time = time.time()
11368                                 self._status_display.running = self._jobs
11369                                 task.addExitListener(self._build_exit)
11370                                 self._task_queues.jobs.add(task)
11371
11372                 return bool(state_change)
11373
11374         def _task(self, pkg):
11375
11376                 pkg_to_replace = None
11377                 if pkg.operation != "uninstall":
11378                         vardb = pkg.root_config.trees["vartree"].dbapi
11379                         previous_cpv = vardb.match(pkg.slot_atom)
11380                         if previous_cpv:
11381                                 previous_cpv = previous_cpv.pop()
11382                                 pkg_to_replace = self._pkg(previous_cpv,
11383                                         "installed", pkg.root_config, installed=True)
11384
11385                 task = MergeListItem(args_set=self._args_set,
11386                         background=self._background, binpkg_opts=self._binpkg_opts,
11387                         build_opts=self._build_opts,
11388                         config_pool=self._ConfigPool(pkg.root,
11389                         self._allocate_config, self._deallocate_config),
11390                         emerge_opts=self.myopts,
11391                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11392                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11393                         pkg_to_replace=pkg_to_replace,
11394                         prefetcher=self._prefetchers.get(pkg),
11395                         scheduler=self._sched_iface,
11396                         settings=self._allocate_config(pkg.root),
11397                         statusMessage=self._status_msg,
11398                         world_atom=self._world_atom)
11399
11400                 return task
11401
11402         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11403                 pkg = failed_pkg.pkg
11404                 msg = "%s to %s %s" % \
11405                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11406                 if pkg.root != "/":
11407                         msg += " %s %s" % (preposition, pkg.root)
11408
11409                 log_path = self._locate_failure_log(failed_pkg)
11410                 if log_path is not None:
11411                         msg += ", Log file:"
11412                 self._status_msg(msg)
11413
11414                 if log_path is not None:
11415                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11416
11417         def _status_msg(self, msg):
11418                 """
11419                 Display a brief status message (no newlines) in the status display.
11420                 This is called by tasks to provide feedback to the user. This
11421                 delegates the resposibility of generating \r and \n control characters,
11422                 to guarantee that lines are created or erased when necessary and
11423                 appropriate.
11424
11425                 @type msg: str
11426                 @param msg: a brief status message (no newlines allowed)
11427                 """
11428                 if not self._background:
11429                         writemsg_level("\n")
11430                 self._status_display.displayMessage(msg)
11431
11432         def _save_resume_list(self):
11433                 """
11434                 Do this before verifying the ebuild Manifests since it might
11435                 be possible for the user to use --resume --skipfirst get past
11436                 a non-essential package with a broken digest.
11437                 """
11438                 mtimedb = self._mtimedb
11439                 mtimedb["resume"]["mergelist"] = [list(x) \
11440                         for x in self._mergelist \
11441                         if isinstance(x, Package) and x.operation == "merge"]
11442
11443                 mtimedb.commit()
11444
11445         def _calc_resume_list(self):
11446                 """
11447                 Use the current resume list to calculate a new one,
11448                 dropping any packages with unsatisfied deps.
11449                 @rtype: bool
11450                 @returns: True if successful, False otherwise.
11451                 """
11452                 print colorize("GOOD", "*** Resuming merge...")
11453
11454                 if self._show_list():
11455                         if "--tree" in self.myopts:
11456                                 portage.writemsg_stdout("\n" + \
11457                                         darkgreen("These are the packages that " + \
11458                                         "would be merged, in reverse order:\n\n"))
11459
11460                         else:
11461                                 portage.writemsg_stdout("\n" + \
11462                                         darkgreen("These are the packages that " + \
11463                                         "would be merged, in order:\n\n"))
11464
11465                 show_spinner = "--quiet" not in self.myopts and \
11466                         "--nodeps" not in self.myopts
11467
11468                 if show_spinner:
11469                         print "Calculating dependencies  ",
11470
11471                 myparams = create_depgraph_params(self.myopts, None)
11472                 success = False
11473                 e = None
11474                 try:
11475                         success, mydepgraph, dropped_tasks = resume_depgraph(
11476                                 self.settings, self.trees, self._mtimedb, self.myopts,
11477                                 myparams, self._spinner)
11478                 except depgraph.UnsatisfiedResumeDep, exc:
11479                         # rename variable to avoid python-3.0 error:
11480                         # SyntaxError: can not delete variable 'e' referenced in nested
11481                         #              scope
11482                         e = exc
11483                         mydepgraph = e.depgraph
11484                         dropped_tasks = set()
11485
11486                 if show_spinner:
11487                         print "\b\b... done!"
11488
11489                 if e is not None:
11490                         def unsatisfied_resume_dep_msg():
11491                                 mydepgraph.display_problems()
11492                                 out = portage.output.EOutput()
11493                                 out.eerror("One or more packages are either masked or " + \
11494                                         "have missing dependencies:")
11495                                 out.eerror("")
11496                                 indent = "  "
11497                                 show_parents = set()
11498                                 for dep in e.value:
11499                                         if dep.parent in show_parents:
11500                                                 continue
11501                                         show_parents.add(dep.parent)
11502                                         if dep.atom is None:
11503                                                 out.eerror(indent + "Masked package:")
11504                                                 out.eerror(2 * indent + str(dep.parent))
11505                                                 out.eerror("")
11506                                         else:
11507                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11508                                                 out.eerror(2 * indent + str(dep.parent))
11509                                                 out.eerror("")
11510                                 msg = "The resume list contains packages " + \
11511                                         "that are either masked or have " + \
11512                                         "unsatisfied dependencies. " + \
11513                                         "Please restart/continue " + \
11514                                         "the operation manually, or use --skipfirst " + \
11515                                         "to skip the first package in the list and " + \
11516                                         "any other packages that may be " + \
11517                                         "masked or have missing dependencies."
11518                                 for line in textwrap.wrap(msg, 72):
11519                                         out.eerror(line)
11520                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11521                         return False
11522
11523                 if success and self._show_list():
11524                         mylist = mydepgraph.altlist()
11525                         if mylist:
11526                                 if "--tree" in self.myopts:
11527                                         mylist.reverse()
11528                                 mydepgraph.display(mylist, favorites=self._favorites)
11529
11530                 if not success:
11531                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11532                         return False
11533                 mydepgraph.display_problems()
11534
11535                 mylist = mydepgraph.altlist()
11536                 mydepgraph.break_refs(mylist)
11537                 mydepgraph.break_refs(dropped_tasks)
11538                 self._mergelist = mylist
11539                 self._set_digraph(mydepgraph.schedulerGraph())
11540
11541                 msg_width = 75
11542                 for task in dropped_tasks:
11543                         if not (isinstance(task, Package) and task.operation == "merge"):
11544                                 continue
11545                         pkg = task
11546                         msg = "emerge --keep-going:" + \
11547                                 " %s" % (pkg.cpv,)
11548                         if pkg.root != "/":
11549                                 msg += " for %s" % (pkg.root,)
11550                         msg += " dropped due to unsatisfied dependency."
11551                         for line in textwrap.wrap(msg, msg_width):
11552                                 eerror(line, phase="other", key=pkg.cpv)
11553                         settings = self.pkgsettings[pkg.root]
11554                         # Ensure that log collection from $T is disabled inside
11555                         # elog_process(), since any logs that might exist are
11556                         # not valid here.
11557                         settings.pop("T", None)
11558                         portage.elog.elog_process(pkg.cpv, settings)
11559                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11560
11561                 return True
11562
11563         def _show_list(self):
11564                 myopts = self.myopts
11565                 if "--quiet" not in myopts and \
11566                         ("--ask" in myopts or "--tree" in myopts or \
11567                         "--verbose" in myopts):
11568                         return True
11569                 return False
11570
11571         def _world_atom(self, pkg):
11572                 """
11573                 Add the package to the world file, but only if
11574                 it's supposed to be added. Otherwise, do nothing.
11575                 """
11576
11577                 if set(("--buildpkgonly", "--fetchonly",
11578                         "--fetch-all-uri",
11579                         "--oneshot", "--onlydeps",
11580                         "--pretend")).intersection(self.myopts):
11581                         return
11582
11583                 if pkg.root != self.target_root:
11584                         return
11585
11586                 args_set = self._args_set
11587                 if not args_set.findAtomForPackage(pkg):
11588                         return
11589
11590                 logger = self._logger
11591                 pkg_count = self._pkg_count
11592                 root_config = pkg.root_config
11593                 world_set = root_config.sets["world"]
11594                 world_locked = False
11595                 if hasattr(world_set, "lock"):
11596                         world_set.lock()
11597                         world_locked = True
11598
11599                 try:
11600                         if hasattr(world_set, "load"):
11601                                 world_set.load() # maybe it's changed on disk
11602
11603                         atom = create_world_atom(pkg, args_set, root_config)
11604                         if atom:
11605                                 if hasattr(world_set, "add"):
11606                                         self._status_msg(('Recording %s in "world" ' + \
11607                                                 'favorites file...') % atom)
11608                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11609                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11610                                         world_set.add(atom)
11611                                 else:
11612                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11613                                                 (atom,), level=logging.WARN, noiselevel=-1)
11614                 finally:
11615                         if world_locked:
11616                                 world_set.unlock()
11617
11618         def _pkg(self, cpv, type_name, root_config, installed=False):
11619                 """
11620                 Get a package instance from the cache, or create a new
11621                 one if necessary. Raises KeyError from aux_get if it
11622                 failures for some reason (package does not exist or is
11623                 corrupt).
11624                 """
11625                 operation = "merge"
11626                 if installed:
11627                         operation = "nomerge"
11628
11629                 if self._digraph is not None:
11630                         # Reuse existing instance when available.
11631                         pkg = self._digraph.get(
11632                                 (type_name, root_config.root, cpv, operation))
11633                         if pkg is not None:
11634                                 return pkg
11635
11636                 tree_type = depgraph.pkg_tree_map[type_name]
11637                 db = root_config.trees[tree_type].dbapi
11638                 db_keys = list(self.trees[root_config.root][
11639                         tree_type].dbapi._aux_cache_keys)
11640                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11641                 pkg = Package(cpv=cpv, metadata=metadata,
11642                         root_config=root_config, installed=installed)
11643                 if type_name == "ebuild":
11644                         settings = self.pkgsettings[root_config.root]
11645                         settings.setcpv(pkg)
11646                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11647                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11648
11649                 return pkg
11650
11651 class MetadataRegen(PollScheduler):
11652
11653         def __init__(self, portdb, cp_iter=None, consumer=None,
11654                 max_jobs=None, max_load=None):
11655                 PollScheduler.__init__(self)
11656                 self._portdb = portdb
11657                 self._global_cleanse = False
11658                 if cp_iter is None:
11659                         cp_iter = self._iter_every_cp()
11660                         # We can globally cleanse stale cache only if we
11661                         # iterate over every single cp.
11662                         self._global_cleanse = True
11663                 self._cp_iter = cp_iter
11664                 self._consumer = consumer
11665
11666                 if max_jobs is None:
11667                         max_jobs = 1
11668
11669                 self._max_jobs = max_jobs
11670                 self._max_load = max_load
11671                 self._sched_iface = self._sched_iface_class(
11672                         register=self._register,
11673                         schedule=self._schedule_wait,
11674                         unregister=self._unregister)
11675
11676                 self._valid_pkgs = set()
11677                 self._cp_set = set()
11678                 self._process_iter = self._iter_metadata_processes()
11679                 self.returncode = os.EX_OK
11680                 self._error_count = 0
11681
11682         def _iter_every_cp(self):
11683                 every_cp = self._portdb.cp_all()
11684                 every_cp.sort(reverse=True)
11685                 try:
11686                         while True:
11687                                 yield every_cp.pop()
11688                 except IndexError:
11689                         pass
11690
11691         def _iter_metadata_processes(self):
11692                 portdb = self._portdb
11693                 valid_pkgs = self._valid_pkgs
11694                 cp_set = self._cp_set
11695                 consumer = self._consumer
11696
11697                 for cp in self._cp_iter:
11698                         cp_set.add(cp)
11699                         portage.writemsg_stdout("Processing %s\n" % cp)
11700                         cpv_list = portdb.cp_list(cp)
11701                         for cpv in cpv_list:
11702                                 valid_pkgs.add(cpv)
11703                                 ebuild_path, repo_path = portdb.findname2(cpv)
11704                                 metadata, st, emtime = portdb._pull_valid_cache(
11705                                         cpv, ebuild_path, repo_path)
11706                                 if metadata is not None:
11707                                         if consumer is not None:
11708                                                 consumer(cpv, ebuild_path,
11709                                                         repo_path, metadata)
11710                                         continue
11711
11712                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11713                                         ebuild_mtime=emtime,
11714                                         metadata_callback=portdb._metadata_callback,
11715                                         portdb=portdb, repo_path=repo_path,
11716                                         settings=portdb.doebuild_settings)
11717
11718         def run(self):
11719
11720                 portdb = self._portdb
11721                 from portage.cache.cache_errors import CacheError
11722                 dead_nodes = {}
11723
11724                 while self._schedule():
11725                         self._poll_loop()
11726
11727                 while self._jobs:
11728                         self._poll_loop()
11729
11730                 if self._global_cleanse:
11731                         for mytree in portdb.porttrees:
11732                                 try:
11733                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11734                                 except CacheError, e:
11735                                         portage.writemsg("Error listing cache entries for " + \
11736                                                 "'%s': %s, continuing...\n" % (mytree, e),
11737                                                 noiselevel=-1)
11738                                         del e
11739                                         dead_nodes = None
11740                                         break
11741                 else:
11742                         cp_set = self._cp_set
11743                         cpv_getkey = portage.cpv_getkey
11744                         for mytree in portdb.porttrees:
11745                                 try:
11746                                         dead_nodes[mytree] = set(cpv for cpv in \
11747                                                 portdb.auxdb[mytree].iterkeys() \
11748                                                 if cpv_getkey(cpv) in cp_set)
11749                                 except CacheError, e:
11750                                         portage.writemsg("Error listing cache entries for " + \
11751                                                 "'%s': %s, continuing...\n" % (mytree, e),
11752                                                 noiselevel=-1)
11753                                         del e
11754                                         dead_nodes = None
11755                                         break
11756
11757                 if dead_nodes:
11758                         for y in self._valid_pkgs:
11759                                 for mytree in portdb.porttrees:
11760                                         if portdb.findname2(y, mytree=mytree)[0]:
11761                                                 dead_nodes[mytree].discard(y)
11762
11763                         for mytree, nodes in dead_nodes.iteritems():
11764                                 auxdb = portdb.auxdb[mytree]
11765                                 for y in nodes:
11766                                         try:
11767                                                 del auxdb[y]
11768                                         except (KeyError, CacheError):
11769                                                 pass
11770
11771         def _schedule_tasks(self):
11772                 """
11773                 @rtype: bool
11774                 @returns: True if there may be remaining tasks to schedule,
11775                         False otherwise.
11776                 """
11777                 while self._can_add_job():
11778                         try:
11779                                 metadata_process = self._process_iter.next()
11780                         except StopIteration:
11781                                 return False
11782
11783                         self._jobs += 1
11784                         metadata_process.scheduler = self._sched_iface
11785                         metadata_process.addExitListener(self._metadata_exit)
11786                         metadata_process.start()
11787                 return True
11788
11789         def _metadata_exit(self, metadata_process):
11790                 self._jobs -= 1
11791                 if metadata_process.returncode != os.EX_OK:
11792                         self.returncode = 1
11793                         self._error_count += 1
11794                         self._valid_pkgs.discard(metadata_process.cpv)
11795                         portage.writemsg("Error processing %s, continuing...\n" % \
11796                                 (metadata_process.cpv,), noiselevel=-1)
11797
11798                 if self._consumer is not None:
11799                         # On failure, still notify the consumer (in this case the metadata
11800                         # argument is None).
11801                         self._consumer(metadata_process.cpv,
11802                                 metadata_process.ebuild_path,
11803                                 metadata_process.repo_path,
11804                                 metadata_process.metadata)
11805
11806                 self._schedule()
11807
11808 class UninstallFailure(portage.exception.PortageException):
11809         """
11810         An instance of this class is raised by unmerge() when
11811         an uninstallation fails.
11812         """
11813         status = 1
11814         def __init__(self, *pargs):
11815                 portage.exception.PortageException.__init__(self, pargs)
11816                 if pargs:
11817                         self.status = pargs[0]
11818
11819 def unmerge(root_config, myopts, unmerge_action,
11820         unmerge_files, ldpath_mtimes, autoclean=0,
11821         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11822         scheduler=None, writemsg_level=portage.util.writemsg_level):
11823
11824         quiet = "--quiet" in myopts
11825         settings = root_config.settings
11826         sets = root_config.sets
11827         vartree = root_config.trees["vartree"]
11828         candidate_catpkgs=[]
11829         global_unmerge=0
11830         xterm_titles = "notitles" not in settings.features
11831         out = portage.output.EOutput()
11832         pkg_cache = {}
11833         db_keys = list(vartree.dbapi._aux_cache_keys)
11834
11835         def _pkg(cpv):
11836                 pkg = pkg_cache.get(cpv)
11837                 if pkg is None:
11838                         pkg = Package(cpv=cpv, installed=True,
11839                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11840                                 root_config=root_config,
11841                                 type_name="installed")
11842                         pkg_cache[cpv] = pkg
11843                 return pkg
11844
11845         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11846         try:
11847                 # At least the parent needs to exist for the lock file.
11848                 portage.util.ensure_dirs(vdb_path)
11849         except portage.exception.PortageException:
11850                 pass
11851         vdb_lock = None
11852         try:
11853                 if os.access(vdb_path, os.W_OK):
11854                         vdb_lock = portage.locks.lockdir(vdb_path)
11855                 realsyslist = sets["system"].getAtoms()
11856                 syslist = []
11857                 for x in realsyslist:
11858                         mycp = portage.dep_getkey(x)
11859                         if mycp in settings.getvirtuals():
11860                                 providers = []
11861                                 for provider in settings.getvirtuals()[mycp]:
11862                                         if vartree.dbapi.match(provider):
11863                                                 providers.append(provider)
11864                                 if len(providers) == 1:
11865                                         syslist.extend(providers)
11866                         else:
11867                                 syslist.append(mycp)
11868         
11869                 mysettings = portage.config(clone=settings)
11870         
11871                 if not unmerge_files:
11872                         if unmerge_action == "unmerge":
11873                                 print
11874                                 print bold("emerge unmerge") + " can only be used with specific package names"
11875                                 print
11876                                 return 0
11877                         else:
11878                                 global_unmerge = 1
11879         
11880                 localtree = vartree
11881                 # process all arguments and add all
11882                 # valid db entries to candidate_catpkgs
11883                 if global_unmerge:
11884                         if not unmerge_files:
11885                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11886                 else:
11887                         #we've got command-line arguments
11888                         if not unmerge_files:
11889                                 print "\nNo packages to unmerge have been provided.\n"
11890                                 return 0
11891                         for x in unmerge_files:
11892                                 arg_parts = x.split('/')
11893                                 if x[0] not in [".","/"] and \
11894                                         arg_parts[-1][-7:] != ".ebuild":
11895                                         #possible cat/pkg or dep; treat as such
11896                                         candidate_catpkgs.append(x)
11897                                 elif unmerge_action in ["prune","clean"]:
11898                                         print "\n!!! Prune and clean do not accept individual" + \
11899                                                 " ebuilds as arguments;\n    skipping.\n"
11900                                         continue
11901                                 else:
11902                                         # it appears that the user is specifying an installed
11903                                         # ebuild and we're in "unmerge" mode, so it's ok.
11904                                         if not os.path.exists(x):
11905                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11906                                                 return 0
11907         
11908                                         absx   = os.path.abspath(x)
11909                                         sp_absx = absx.split("/")
11910                                         if sp_absx[-1][-7:] == ".ebuild":
11911                                                 del sp_absx[-1]
11912                                                 absx = "/".join(sp_absx)
11913         
11914                                         sp_absx_len = len(sp_absx)
11915         
11916                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11917                                         vdb_len  = len(vdb_path)
11918         
11919                                         sp_vdb     = vdb_path.split("/")
11920                                         sp_vdb_len = len(sp_vdb)
11921         
11922                                         if not os.path.exists(absx+"/CONTENTS"):
11923                                                 print "!!! Not a valid db dir: "+str(absx)
11924                                                 return 0
11925         
11926                                         if sp_absx_len <= sp_vdb_len:
11927                                                 # The Path is shorter... so it can't be inside the vdb.
11928                                                 print sp_absx
11929                                                 print absx
11930                                                 print "\n!!!",x,"cannot be inside "+ \
11931                                                         vdb_path+"; aborting.\n"
11932                                                 return 0
11933         
11934                                         for idx in range(0,sp_vdb_len):
11935                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11936                                                         print sp_absx
11937                                                         print absx
11938                                                         print "\n!!!", x, "is not inside "+\
11939                                                                 vdb_path+"; aborting.\n"
11940                                                         return 0
11941         
11942                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11943                                         candidate_catpkgs.append(
11944                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11945         
11946                 newline=""
11947                 if (not "--quiet" in myopts):
11948                         newline="\n"
11949                 if settings["ROOT"] != "/":
11950                         writemsg_level(darkgreen(newline+ \
11951                                 ">>> Using system located in ROOT tree %s\n" % \
11952                                 settings["ROOT"]))
11953
11954                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11955                         not ("--quiet" in myopts):
11956                         writemsg_level(darkgreen(newline+\
11957                                 ">>> These are the packages that would be unmerged:\n"))
11958
11959                 # Preservation of order is required for --depclean and --prune so
11960                 # that dependencies are respected. Use all_selected to eliminate
11961                 # duplicate packages since the same package may be selected by
11962                 # multiple atoms.
11963                 pkgmap = []
11964                 all_selected = set()
11965                 for x in candidate_catpkgs:
11966                         # cycle through all our candidate deps and determine
11967                         # what will and will not get unmerged
11968                         try:
11969                                 mymatch = vartree.dbapi.match(x)
11970                         except portage.exception.AmbiguousPackageName, errpkgs:
11971                                 print "\n\n!!! The short ebuild name \"" + \
11972                                         x + "\" is ambiguous.  Please specify"
11973                                 print "!!! one of the following fully-qualified " + \
11974                                         "ebuild names instead:\n"
11975                                 for i in errpkgs[0]:
11976                                         print "    " + green(i)
11977                                 print
11978                                 sys.exit(1)
11979         
11980                         if not mymatch and x[0] not in "<>=~":
11981                                 mymatch = localtree.dep_match(x)
11982                         if not mymatch:
11983                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11984                                         (x, unmerge_action), noiselevel=-1)
11985                                 continue
11986
11987                         pkgmap.append(
11988                                 {"protected": set(), "selected": set(), "omitted": set()})
11989                         mykey = len(pkgmap) - 1
11990                         if unmerge_action=="unmerge":
11991                                         for y in mymatch:
11992                                                 if y not in all_selected:
11993                                                         pkgmap[mykey]["selected"].add(y)
11994                                                         all_selected.add(y)
11995                         elif unmerge_action == "prune":
11996                                 if len(mymatch) == 1:
11997                                         continue
11998                                 best_version = mymatch[0]
11999                                 best_slot = vartree.getslot(best_version)
12000                                 best_counter = vartree.dbapi.cpv_counter(best_version)
12001                                 for mypkg in mymatch[1:]:
12002                                         myslot = vartree.getslot(mypkg)
12003                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
12004                                         if (myslot == best_slot and mycounter > best_counter) or \
12005                                                 mypkg == portage.best([mypkg, best_version]):
12006                                                 if myslot == best_slot:
12007                                                         if mycounter < best_counter:
12008                                                                 # On slot collision, keep the one with the
12009                                                                 # highest counter since it is the most
12010                                                                 # recently installed.
12011                                                                 continue
12012                                                 best_version = mypkg
12013                                                 best_slot = myslot
12014                                                 best_counter = mycounter
12015                                 pkgmap[mykey]["protected"].add(best_version)
12016                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12017                                         if mypkg != best_version and mypkg not in all_selected)
12018                                 all_selected.update(pkgmap[mykey]["selected"])
12019                         else:
12020                                 # unmerge_action == "clean"
12021                                 slotmap={}
12022                                 for mypkg in mymatch:
12023                                         if unmerge_action == "clean":
12024                                                 myslot = localtree.getslot(mypkg)
12025                                         else:
12026                                                 # since we're pruning, we don't care about slots
12027                                                 # and put all the pkgs in together
12028                                                 myslot = 0
12029                                         if myslot not in slotmap:
12030                                                 slotmap[myslot] = {}
12031                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12032
12033                                 for mypkg in vartree.dbapi.cp_list(
12034                                         portage.dep_getkey(mymatch[0])):
12035                                         myslot = vartree.getslot(mypkg)
12036                                         if myslot not in slotmap:
12037                                                 slotmap[myslot] = {}
12038                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12039
12040                                 for myslot in slotmap:
12041                                         counterkeys = slotmap[myslot].keys()
12042                                         if not counterkeys:
12043                                                 continue
12044                                         counterkeys.sort()
12045                                         pkgmap[mykey]["protected"].add(
12046                                                 slotmap[myslot][counterkeys[-1]])
12047                                         del counterkeys[-1]
12048
12049                                         for counter in counterkeys[:]:
12050                                                 mypkg = slotmap[myslot][counter]
12051                                                 if mypkg not in mymatch:
12052                                                         counterkeys.remove(counter)
12053                                                         pkgmap[mykey]["protected"].add(
12054                                                                 slotmap[myslot][counter])
12055
12056                                         #be pretty and get them in order of merge:
12057                                         for ckey in counterkeys:
12058                                                 mypkg = slotmap[myslot][ckey]
12059                                                 if mypkg not in all_selected:
12060                                                         pkgmap[mykey]["selected"].add(mypkg)
12061                                                         all_selected.add(mypkg)
12062                                         # ok, now the last-merged package
12063                                         # is protected, and the rest are selected
12064                 numselected = len(all_selected)
12065                 if global_unmerge and not numselected:
12066                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12067                         return 0
12068         
12069                 if not numselected:
12070                         portage.writemsg_stdout(
12071                                 "\n>>> No packages selected for removal by " + \
12072                                 unmerge_action + "\n")
12073                         return 0
12074         finally:
12075                 if vdb_lock:
12076                         vartree.dbapi.flush_cache()
12077                         portage.locks.unlockdir(vdb_lock)
12078         
12079         from portage.sets.base import EditablePackageSet
12080         
12081         # generate a list of package sets that are directly or indirectly listed in "world",
12082         # as there is no persistent list of "installed" sets
12083         installed_sets = ["world"]
12084         stop = False
12085         pos = 0
12086         while not stop:
12087                 stop = True
12088                 pos = len(installed_sets)
12089                 for s in installed_sets[pos - 1:]:
12090                         if s not in sets:
12091                                 continue
12092                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12093                         if candidates:
12094                                 stop = False
12095                                 installed_sets += candidates
12096         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12097         del stop, pos
12098
12099         # we don't want to unmerge packages that are still listed in user-editable package sets
12100         # listed in "world" as they would be remerged on the next update of "world" or the 
12101         # relevant package sets.
12102         unknown_sets = set()
12103         for cp in xrange(len(pkgmap)):
12104                 for cpv in pkgmap[cp]["selected"].copy():
12105                         try:
12106                                 pkg = _pkg(cpv)
12107                         except KeyError:
12108                                 # It could have been uninstalled
12109                                 # by a concurrent process.
12110                                 continue
12111
12112                         if unmerge_action != "clean" and \
12113                                 root_config.root == "/" and \
12114                                 portage.match_from_list(
12115                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12116                                 msg = ("Not unmerging package %s since there is no valid " + \
12117                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12118                                 for line in textwrap.wrap(msg, 75):
12119                                         out.eerror(line)
12120                                 # adjust pkgmap so the display output is correct
12121                                 pkgmap[cp]["selected"].remove(cpv)
12122                                 all_selected.remove(cpv)
12123                                 pkgmap[cp]["protected"].add(cpv)
12124                                 continue
12125
12126                         parents = []
12127                         for s in installed_sets:
12128                                 # skip sets that the user requested to unmerge, and skip world 
12129                                 # unless we're unmerging a package set (as the package would be 
12130                                 # removed from "world" later on)
12131                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12132                                         continue
12133
12134                                 if s not in sets:
12135                                         if s in unknown_sets:
12136                                                 continue
12137                                         unknown_sets.add(s)
12138                                         out = portage.output.EOutput()
12139                                         out.eerror(("Unknown set '@%s' in " + \
12140                                                 "%svar/lib/portage/world_sets") % \
12141                                                 (s, root_config.root))
12142                                         continue
12143
12144                                 # only check instances of EditablePackageSet as other classes are generally used for
12145                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12146                                 # user can't do much about them anyway)
12147                                 if isinstance(sets[s], EditablePackageSet):
12148
12149                                         # This is derived from a snippet of code in the
12150                                         # depgraph._iter_atoms_for_pkg() method.
12151                                         for atom in sets[s].iterAtomsForPackage(pkg):
12152                                                 inst_matches = vartree.dbapi.match(atom)
12153                                                 inst_matches.reverse() # descending order
12154                                                 higher_slot = None
12155                                                 for inst_cpv in inst_matches:
12156                                                         try:
12157                                                                 inst_pkg = _pkg(inst_cpv)
12158                                                         except KeyError:
12159                                                                 # It could have been uninstalled
12160                                                                 # by a concurrent process.
12161                                                                 continue
12162
12163                                                         if inst_pkg.cp != atom.cp:
12164                                                                 continue
12165                                                         if pkg >= inst_pkg:
12166                                                                 # This is descending order, and we're not
12167                                                                 # interested in any versions <= pkg given.
12168                                                                 break
12169                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12170                                                                 higher_slot = inst_pkg
12171                                                                 break
12172                                                 if higher_slot is None:
12173                                                         parents.append(s)
12174                                                         break
12175                         if parents:
12176                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12177                                 #print colorize("WARN", "but still listed in the following package sets:")
12178                                 #print "    %s\n" % ", ".join(parents)
12179                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12180                                 print colorize("WARN", "still referenced by the following package sets:")
12181                                 print "    %s\n" % ", ".join(parents)
12182                                 # adjust pkgmap so the display output is correct
12183                                 pkgmap[cp]["selected"].remove(cpv)
12184                                 all_selected.remove(cpv)
12185                                 pkgmap[cp]["protected"].add(cpv)
12186         
12187         del installed_sets
12188
12189         numselected = len(all_selected)
12190         if not numselected:
12191                 writemsg_level(
12192                         "\n>>> No packages selected for removal by " + \
12193                         unmerge_action + "\n")
12194                 return 0
12195
12196         # Unmerge order only matters in some cases
12197         if not ordered:
12198                 unordered = {}
12199                 for d in pkgmap:
12200                         selected = d["selected"]
12201                         if not selected:
12202                                 continue
12203                         cp = portage.cpv_getkey(iter(selected).next())
12204                         cp_dict = unordered.get(cp)
12205                         if cp_dict is None:
12206                                 cp_dict = {}
12207                                 unordered[cp] = cp_dict
12208                                 for k in d:
12209                                         cp_dict[k] = set()
12210                         for k, v in d.iteritems():
12211                                 cp_dict[k].update(v)
12212                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12213
12214         for x in xrange(len(pkgmap)):
12215                 selected = pkgmap[x]["selected"]
12216                 if not selected:
12217                         continue
12218                 for mytype, mylist in pkgmap[x].iteritems():
12219                         if mytype == "selected":
12220                                 continue
12221                         mylist.difference_update(all_selected)
12222                 cp = portage.cpv_getkey(iter(selected).next())
12223                 for y in localtree.dep_match(cp):
12224                         if y not in pkgmap[x]["omitted"] and \
12225                                 y not in pkgmap[x]["selected"] and \
12226                                 y not in pkgmap[x]["protected"] and \
12227                                 y not in all_selected:
12228                                 pkgmap[x]["omitted"].add(y)
12229                 if global_unmerge and not pkgmap[x]["selected"]:
12230                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12231                         continue
12232                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12233                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12234                                 "'%s' is part of your system profile.\n" % cp),
12235                                 level=logging.WARNING, noiselevel=-1)
12236                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12237                                 "be damaging to your system.\n\n"),
12238                                 level=logging.WARNING, noiselevel=-1)
12239                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12240                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12241                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12242                 if not quiet:
12243                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12244                 else:
12245                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12246                 for mytype in ["selected","protected","omitted"]:
12247                         if not quiet:
12248                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12249                         if pkgmap[x][mytype]:
12250                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12251                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12252                                 for pn, ver, rev in sorted_pkgs:
12253                                         if rev == "r0":
12254                                                 myversion = ver
12255                                         else:
12256                                                 myversion = ver + "-" + rev
12257                                         if mytype == "selected":
12258                                                 writemsg_level(
12259                                                         colorize("UNMERGE_WARN", myversion + " "),
12260                                                         noiselevel=-1)
12261                                         else:
12262                                                 writemsg_level(
12263                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12264                         else:
12265                                 writemsg_level("none ", noiselevel=-1)
12266                         if not quiet:
12267                                 writemsg_level("\n", noiselevel=-1)
12268                 if quiet:
12269                         writemsg_level("\n", noiselevel=-1)
12270
12271         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12272                 " packages are slated for removal.\n")
12273         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12274                         " and " + colorize("GOOD", "'omitted'") + \
12275                         " packages will not be removed.\n\n")
12276
12277         if "--pretend" in myopts:
12278                 #we're done... return
12279                 return 0
12280         if "--ask" in myopts:
12281                 if userquery("Would you like to unmerge these packages?")=="No":
12282                         # enter pretend mode for correct formatting of results
12283                         myopts["--pretend"] = True
12284                         print
12285                         print "Quitting."
12286                         print
12287                         return 0
12288         #the real unmerging begins, after a short delay....
12289         if clean_delay and not autoclean:
12290                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12291
12292         for x in xrange(len(pkgmap)):
12293                 for y in pkgmap[x]["selected"]:
12294                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12295                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12296                         mysplit = y.split("/")
12297                         #unmerge...
12298                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12299                                 mysettings, unmerge_action not in ["clean","prune"],
12300                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12301                                 scheduler=scheduler)
12302
12303                         if retval != os.EX_OK:
12304                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12305                                 if raise_on_error:
12306                                         raise UninstallFailure(retval)
12307                                 sys.exit(retval)
12308                         else:
12309                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12310                                         sets["world"].cleanPackage(vartree.dbapi, y)
12311                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12312         if clean_world and hasattr(sets["world"], "remove"):
12313                 for s in root_config.setconfig.active:
12314                         sets["world"].remove(SETPREFIX+s)
12315         return 1
12316
12317 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12318
12319         if os.path.exists("/usr/bin/install-info"):
12320                 out = portage.output.EOutput()
12321                 regen_infodirs=[]
12322                 for z in infodirs:
12323                         if z=='':
12324                                 continue
12325                         inforoot=normpath(root+z)
12326                         if os.path.isdir(inforoot):
12327                                 infomtime = long(os.stat(inforoot).st_mtime)
12328                                 if inforoot not in prev_mtimes or \
12329                                         prev_mtimes[inforoot] != infomtime:
12330                                                 regen_infodirs.append(inforoot)
12331
12332                 if not regen_infodirs:
12333                         portage.writemsg_stdout("\n")
12334                         out.einfo("GNU info directory index is up-to-date.")
12335                 else:
12336                         portage.writemsg_stdout("\n")
12337                         out.einfo("Regenerating GNU info directory index...")
12338
12339                         dir_extensions = ("", ".gz", ".bz2")
12340                         icount=0
12341                         badcount=0
12342                         errmsg = ""
12343                         for inforoot in regen_infodirs:
12344                                 if inforoot=='':
12345                                         continue
12346
12347                                 if not os.path.isdir(inforoot) or \
12348                                         not os.access(inforoot, os.W_OK):
12349                                         continue
12350
12351                                 file_list = os.listdir(inforoot)
12352                                 file_list.sort()
12353                                 dir_file = os.path.join(inforoot, "dir")
12354                                 moved_old_dir = False
12355                                 processed_count = 0
12356                                 for x in file_list:
12357                                         if x.startswith(".") or \
12358                                                 os.path.isdir(os.path.join(inforoot, x)):
12359                                                 continue
12360                                         if x.startswith("dir"):
12361                                                 skip = False
12362                                                 for ext in dir_extensions:
12363                                                         if x == "dir" + ext or \
12364                                                                 x == "dir" + ext + ".old":
12365                                                                 skip = True
12366                                                                 break
12367                                                 if skip:
12368                                                         continue
12369                                         if processed_count == 0:
12370                                                 for ext in dir_extensions:
12371                                                         try:
12372                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12373                                                                 moved_old_dir = True
12374                                                         except EnvironmentError, e:
12375                                                                 if e.errno != errno.ENOENT:
12376                                                                         raise
12377                                                                 del e
12378                                         processed_count += 1
12379                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12380                                         existsstr="already exists, for file `"
12381                                         if myso!="":
12382                                                 if re.search(existsstr,myso):
12383                                                         # Already exists... Don't increment the count for this.
12384                                                         pass
12385                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12386                                                         # This info file doesn't contain a DIR-header: install-info produces this
12387                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12388                                                         # Don't increment the count for this.
12389                                                         pass
12390                                                 else:
12391                                                         badcount=badcount+1
12392                                                         errmsg += myso + "\n"
12393                                         icount=icount+1
12394
12395                                 if moved_old_dir and not os.path.exists(dir_file):
12396                                         # We didn't generate a new dir file, so put the old file
12397                                         # back where it was originally found.
12398                                         for ext in dir_extensions:
12399                                                 try:
12400                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12401                                                 except EnvironmentError, e:
12402                                                         if e.errno != errno.ENOENT:
12403                                                                 raise
12404                                                         del e
12405
12406                                 # Clean dir.old cruft so that they don't prevent
12407                                 # unmerge of otherwise empty directories.
12408                                 for ext in dir_extensions:
12409                                         try:
12410                                                 os.unlink(dir_file + ext + ".old")
12411                                         except EnvironmentError, e:
12412                                                 if e.errno != errno.ENOENT:
12413                                                         raise
12414                                                 del e
12415
12416                                 #update mtime so we can potentially avoid regenerating.
12417                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12418
12419                         if badcount:
12420                                 out.eerror("Processed %d info files; %d errors." % \
12421                                         (icount, badcount))
12422                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12423                         else:
12424                                 if icount > 0:
12425                                         out.einfo("Processed %d info files." % (icount,))
12426
12427
12428 def display_news_notification(root_config, myopts):
12429         target_root = root_config.root
12430         trees = root_config.trees
12431         settings = trees["vartree"].settings
12432         portdb = trees["porttree"].dbapi
12433         vardb = trees["vartree"].dbapi
12434         NEWS_PATH = os.path.join("metadata", "news")
12435         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12436         newsReaderDisplay = False
12437         update = "--pretend" not in myopts
12438
12439         for repo in portdb.getRepositories():
12440                 unreadItems = checkUpdatedNewsItems(
12441                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12442                 if unreadItems:
12443                         if not newsReaderDisplay:
12444                                 newsReaderDisplay = True
12445                                 print
12446                         print colorize("WARN", " * IMPORTANT:"),
12447                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12448                         
12449         
12450         if newsReaderDisplay:
12451                 print colorize("WARN", " *"),
12452                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12453                 print
12454
12455 def display_preserved_libs(vardbapi):
12456         MAX_DISPLAY = 3
12457
12458         # Ensure the registry is consistent with existing files.
12459         vardbapi.plib_registry.pruneNonExisting()
12460
12461         if vardbapi.plib_registry.hasEntries():
12462                 print
12463                 print colorize("WARN", "!!!") + " existing preserved libs:"
12464                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12465                 linkmap = vardbapi.linkmap
12466                 consumer_map = {}
12467                 owners = {}
12468                 linkmap_broken = False
12469
12470                 try:
12471                         linkmap.rebuild()
12472                 except portage.exception.CommandNotFound, e:
12473                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12474                                 level=logging.ERROR, noiselevel=-1)
12475                         del e
12476                         linkmap_broken = True
12477                 else:
12478                         search_for_owners = set()
12479                         for cpv in plibdata:
12480                                 internal_plib_keys = set(linkmap._obj_key(f) \
12481                                         for f in plibdata[cpv])
12482                                 for f in plibdata[cpv]:
12483                                         if f in consumer_map:
12484                                                 continue
12485                                         consumers = []
12486                                         for c in linkmap.findConsumers(f):
12487                                                 # Filter out any consumers that are also preserved libs
12488                                                 # belonging to the same package as the provider.
12489                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12490                                                         consumers.append(c)
12491                                         consumers.sort()
12492                                         consumer_map[f] = consumers
12493                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12494
12495                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12496
12497                 for cpv in plibdata:
12498                         print colorize("WARN", ">>>") + " package: %s" % cpv
12499                         samefile_map = {}
12500                         for f in plibdata[cpv]:
12501                                 obj_key = linkmap._obj_key(f)
12502                                 alt_paths = samefile_map.get(obj_key)
12503                                 if alt_paths is None:
12504                                         alt_paths = set()
12505                                         samefile_map[obj_key] = alt_paths
12506                                 alt_paths.add(f)
12507
12508                         for alt_paths in samefile_map.itervalues():
12509                                 alt_paths = sorted(alt_paths)
12510                                 for p in alt_paths:
12511                                         print colorize("WARN", " * ") + " - %s" % (p,)
12512                                 f = alt_paths[0]
12513                                 consumers = consumer_map.get(f, [])
12514                                 for c in consumers[:MAX_DISPLAY]:
12515                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12516                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12517                                 if len(consumers) == MAX_DISPLAY + 1:
12518                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12519                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12520                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12521                                 elif len(consumers) > MAX_DISPLAY:
12522                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12523                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12524
12525
12526 def _flush_elog_mod_echo():
12527         """
12528         Dump the mod_echo output now so that our other
12529         notifications are shown last.
12530         @rtype: bool
12531         @returns: True if messages were shown, False otherwise.
12532         """
12533         messages_shown = False
12534         try:
12535                 from portage.elog import mod_echo
12536         except ImportError:
12537                 pass # happens during downgrade to a version without the module
12538         else:
12539                 messages_shown = bool(mod_echo._items)
12540                 mod_echo.finalize()
12541         return messages_shown
12542
12543 def post_emerge(root_config, myopts, mtimedb, retval):
12544         """
12545         Misc. things to run at the end of a merge session.
12546         
12547         Update Info Files
12548         Update Config Files
12549         Update News Items
12550         Commit mtimeDB
12551         Display preserved libs warnings
12552         Exit Emerge
12553
12554         @param trees: A dictionary mapping each ROOT to it's package databases
12555         @type trees: dict
12556         @param mtimedb: The mtimeDB to store data needed across merge invocations
12557         @type mtimedb: MtimeDB class instance
12558         @param retval: Emerge's return value
12559         @type retval: Int
12560         @rype: None
12561         @returns:
12562         1.  Calls sys.exit(retval)
12563         """
12564
12565         target_root = root_config.root
12566         trees = { target_root : root_config.trees }
12567         vardbapi = trees[target_root]["vartree"].dbapi
12568         settings = vardbapi.settings
12569         info_mtimes = mtimedb["info"]
12570
12571         # Load the most current variables from ${ROOT}/etc/profile.env
12572         settings.unlock()
12573         settings.reload()
12574         settings.regenerate()
12575         settings.lock()
12576
12577         config_protect = settings.get("CONFIG_PROTECT","").split()
12578         infodirs = settings.get("INFOPATH","").split(":") + \
12579                 settings.get("INFODIR","").split(":")
12580
12581         os.chdir("/")
12582
12583         if retval == os.EX_OK:
12584                 exit_msg = " *** exiting successfully."
12585         else:
12586                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12587         emergelog("notitles" not in settings.features, exit_msg)
12588
12589         _flush_elog_mod_echo()
12590
12591         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12592         if "--pretend" in myopts or (counter_hash is not None and \
12593                 counter_hash == vardbapi._counter_hash()):
12594                 display_news_notification(root_config, myopts)
12595                 # If vdb state has not changed then there's nothing else to do.
12596                 sys.exit(retval)
12597
12598         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12599         portage.util.ensure_dirs(vdb_path)
12600         vdb_lock = None
12601         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12602                 vdb_lock = portage.locks.lockdir(vdb_path)
12603
12604         if vdb_lock:
12605                 try:
12606                         if "noinfo" not in settings.features:
12607                                 chk_updated_info_files(target_root,
12608                                         infodirs, info_mtimes, retval)
12609                         mtimedb.commit()
12610                 finally:
12611                         if vdb_lock:
12612                                 portage.locks.unlockdir(vdb_lock)
12613
12614         chk_updated_cfg_files(target_root, config_protect)
12615         
12616         display_news_notification(root_config, myopts)
12617         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12618                 display_preserved_libs(vardbapi)        
12619
12620         sys.exit(retval)
12621
12622
12623 def chk_updated_cfg_files(target_root, config_protect):
12624         if config_protect:
12625                 #number of directories with some protect files in them
12626                 procount=0
12627                 for x in config_protect:
12628                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12629                         if not os.access(x, os.W_OK):
12630                                 # Avoid Permission denied errors generated
12631                                 # later by `find`.
12632                                 continue
12633                         try:
12634                                 mymode = os.lstat(x).st_mode
12635                         except OSError:
12636                                 continue
12637                         if stat.S_ISLNK(mymode):
12638                                 # We want to treat it like a directory if it
12639                                 # is a symlink to an existing directory.
12640                                 try:
12641                                         real_mode = os.stat(x).st_mode
12642                                         if stat.S_ISDIR(real_mode):
12643                                                 mymode = real_mode
12644                                 except OSError:
12645                                         pass
12646                         if stat.S_ISDIR(mymode):
12647                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12648                         else:
12649                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12650                                         os.path.split(x.rstrip(os.path.sep))
12651                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12652                         a = commands.getstatusoutput(mycommand)
12653                         if a[0] != 0:
12654                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12655                                 sys.stderr.flush()
12656                                 # Show the error message alone, sending stdout to /dev/null.
12657                                 os.system(mycommand + " 1>/dev/null")
12658                         else:
12659                                 files = a[1].split('\0')
12660                                 # split always produces an empty string as the last element
12661                                 if files and not files[-1]:
12662                                         del files[-1]
12663                                 if files:
12664                                         procount += 1
12665                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12666                                         if stat.S_ISDIR(mymode):
12667                                                  print "%d config files in '%s' need updating." % \
12668                                                         (len(files), x)
12669                                         else:
12670                                                  print "config file '%s' needs updating." % x
12671
12672                 if procount:
12673                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12674                                 " section of the " + bold("emerge")
12675                         print " "+yellow("*")+" man page to learn how to update config files."
12676
12677 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12678         update=False):
12679         """
12680         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12681         Returns the number of unread (yet relevent) items.
12682         
12683         @param portdb: a portage tree database
12684         @type portdb: pordbapi
12685         @param vardb: an installed package database
12686         @type vardb: vardbapi
12687         @param NEWS_PATH:
12688         @type NEWS_PATH:
12689         @param UNREAD_PATH:
12690         @type UNREAD_PATH:
12691         @param repo_id:
12692         @type repo_id:
12693         @rtype: Integer
12694         @returns:
12695         1.  The number of unread but relevant news items.
12696         
12697         """
12698         from portage.news import NewsManager
12699         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12700         return manager.getUnreadItems( repo_id, update=update )
12701
12702 def insert_category_into_atom(atom, category):
12703         alphanum = re.search(r'\w', atom)
12704         if alphanum:
12705                 ret = atom[:alphanum.start()] + "%s/" % category + \
12706                         atom[alphanum.start():]
12707         else:
12708                 ret = None
12709         return ret
12710
12711 def is_valid_package_atom(x):
12712         if "/" not in x:
12713                 alphanum = re.search(r'\w', x)
12714                 if alphanum:
12715                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12716         return portage.isvalidatom(x)
12717
12718 def show_blocker_docs_link():
12719         print
12720         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12721         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12722         print
12723         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12724         print
12725
12726 def show_mask_docs():
12727         print "For more information, see the MASKED PACKAGES section in the emerge"
12728         print "man page or refer to the Gentoo Handbook."
12729
12730 def action_sync(settings, trees, mtimedb, myopts, myaction):
12731         xterm_titles = "notitles" not in settings.features
12732         emergelog(xterm_titles, " === sync")
12733         myportdir = settings.get("PORTDIR", None)
12734         out = portage.output.EOutput()
12735         if not myportdir:
12736                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12737                 sys.exit(1)
12738         if myportdir[-1]=="/":
12739                 myportdir=myportdir[:-1]
12740         try:
12741                 st = os.stat(myportdir)
12742         except OSError:
12743                 st = None
12744         if st is None:
12745                 print ">>>",myportdir,"not found, creating it."
12746                 os.makedirs(myportdir,0755)
12747                 st = os.stat(myportdir)
12748
12749         spawn_kwargs = {}
12750         spawn_kwargs["env"] = settings.environ()
12751         if 'usersync' in settings.features and \
12752                 portage.data.secpass >= 2 and \
12753                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12754                 st.st_gid != os.getgid() and st.st_mode & 0070):
12755                 try:
12756                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12757                 except KeyError:
12758                         pass
12759                 else:
12760                         # Drop privileges when syncing, in order to match
12761                         # existing uid/gid settings.
12762                         spawn_kwargs["uid"]    = st.st_uid
12763                         spawn_kwargs["gid"]    = st.st_gid
12764                         spawn_kwargs["groups"] = [st.st_gid]
12765                         spawn_kwargs["env"]["HOME"] = homedir
12766                         umask = 0002
12767                         if not st.st_mode & 0020:
12768                                 umask = umask | 0020
12769                         spawn_kwargs["umask"] = umask
12770
12771         syncuri = settings.get("SYNC", "").strip()
12772         if not syncuri:
12773                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12774                         noiselevel=-1, level=logging.ERROR)
12775                 return 1
12776
12777         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12778         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12779
12780         os.umask(0022)
12781         dosyncuri = syncuri
12782         updatecache_flg = False
12783         if myaction == "metadata":
12784                 print "skipping sync"
12785                 updatecache_flg = True
12786         elif ".git" in vcs_dirs:
12787                 # Update existing git repository, and ignore the syncuri. We are
12788                 # going to trust the user and assume that the user is in the branch
12789                 # that he/she wants updated. We'll let the user manage branches with
12790                 # git directly.
12791                 if portage.process.find_binary("git") is None:
12792                         msg = ["Command not found: git",
12793                         "Type \"emerge dev-util/git\" to enable git support."]
12794                         for l in msg:
12795                                 writemsg_level("!!! %s\n" % l,
12796                                         level=logging.ERROR, noiselevel=-1)
12797                         return 1
12798                 msg = ">>> Starting git pull in %s..." % myportdir
12799                 emergelog(xterm_titles, msg )
12800                 writemsg_level(msg + "\n")
12801                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12802                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12803                 if exitcode != os.EX_OK:
12804                         msg = "!!! git pull error in %s." % myportdir
12805                         emergelog(xterm_titles, msg)
12806                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12807                         return exitcode
12808                 msg = ">>> Git pull in %s successful" % myportdir
12809                 emergelog(xterm_titles, msg)
12810                 writemsg_level(msg + "\n")
12811                 exitcode = git_sync_timestamps(settings, myportdir)
12812                 if exitcode == os.EX_OK:
12813                         updatecache_flg = True
12814         elif syncuri[:8]=="rsync://":
12815                 for vcs_dir in vcs_dirs:
12816                         writemsg_level(("!!! %s appears to be under revision " + \
12817                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12818                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12819                         return 1
12820                 if not os.path.exists("/usr/bin/rsync"):
12821                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12822                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12823                         sys.exit(1)
12824                 mytimeout=180
12825
12826                 rsync_opts = []
12827                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12828                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12829                         rsync_opts.extend([
12830                                 "--recursive",    # Recurse directories
12831                                 "--links",        # Consider symlinks
12832                                 "--safe-links",   # Ignore links outside of tree
12833                                 "--perms",        # Preserve permissions
12834                                 "--times",        # Preserive mod times
12835                                 "--compress",     # Compress the data transmitted
12836                                 "--force",        # Force deletion on non-empty dirs
12837                                 "--whole-file",   # Don't do block transfers, only entire files
12838                                 "--delete",       # Delete files that aren't in the master tree
12839                                 "--stats",        # Show final statistics about what was transfered
12840                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12841                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12842                                 "--exclude=/local",       # Exclude local     from consideration
12843                                 "--exclude=/packages",    # Exclude packages  from consideration
12844                         ])
12845
12846                 else:
12847                         # The below validation is not needed when using the above hardcoded
12848                         # defaults.
12849
12850                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12851                         rsync_opts.extend(
12852                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12853                         for opt in ("--recursive", "--times"):
12854                                 if opt not in rsync_opts:
12855                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12856                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12857                                         rsync_opts.append(opt)
12858         
12859                         for exclude in ("distfiles", "local", "packages"):
12860                                 opt = "--exclude=/%s" % exclude
12861                                 if opt not in rsync_opts:
12862                                         portage.writemsg(yellow("WARNING:") + \
12863                                         " adding required option %s not included in "  % opt + \
12864                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12865                                         rsync_opts.append(opt)
12866         
12867                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12868                                 def rsync_opt_startswith(opt_prefix):
12869                                         for x in rsync_opts:
12870                                                 if x.startswith(opt_prefix):
12871                                                         return True
12872                                         return False
12873
12874                                 if not rsync_opt_startswith("--timeout="):
12875                                         rsync_opts.append("--timeout=%d" % mytimeout)
12876
12877                                 for opt in ("--compress", "--whole-file"):
12878                                         if opt not in rsync_opts:
12879                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12880                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12881                                                 rsync_opts.append(opt)
12882
12883                 if "--quiet" in myopts:
12884                         rsync_opts.append("--quiet")    # Shut up a lot
12885                 else:
12886                         rsync_opts.append("--verbose")  # Print filelist
12887
12888                 if "--verbose" in myopts:
12889                         rsync_opts.append("--progress")  # Progress meter for each file
12890
12891                 if "--debug" in myopts:
12892                         rsync_opts.append("--checksum") # Force checksum on all files
12893
12894                 # Real local timestamp file.
12895                 servertimestampfile = os.path.join(
12896                         myportdir, "metadata", "timestamp.chk")
12897
12898                 content = portage.util.grabfile(servertimestampfile)
12899                 mytimestamp = 0
12900                 if content:
12901                         try:
12902                                 mytimestamp = time.mktime(time.strptime(content[0],
12903                                         "%a, %d %b %Y %H:%M:%S +0000"))
12904                         except (OverflowError, ValueError):
12905                                 pass
12906                 del content
12907
12908                 try:
12909                         rsync_initial_timeout = \
12910                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12911                 except ValueError:
12912                         rsync_initial_timeout = 15
12913
12914                 try:
12915                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12916                 except SystemExit, e:
12917                         raise # Needed else can't exit
12918                 except:
12919                         maxretries=3 #default number of retries
12920
12921                 retries=0
12922                 user_name, hostname, port = re.split(
12923                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12924                 if port is None:
12925                         port=""
12926                 if user_name is None:
12927                         user_name=""
12928                 updatecache_flg=True
12929                 all_rsync_opts = set(rsync_opts)
12930                 extra_rsync_opts = shlex.split(
12931                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12932                 all_rsync_opts.update(extra_rsync_opts)
12933                 family = socket.AF_INET
12934                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12935                         family = socket.AF_INET
12936                 elif socket.has_ipv6 and \
12937                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12938                         family = socket.AF_INET6
12939                 ips=[]
12940                 SERVER_OUT_OF_DATE = -1
12941                 EXCEEDED_MAX_RETRIES = -2
12942                 while (1):
12943                         if ips:
12944                                 del ips[0]
12945                         if ips==[]:
12946                                 try:
12947                                         for addrinfo in socket.getaddrinfo(
12948                                                 hostname, None, family, socket.SOCK_STREAM):
12949                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12950                                                         # IPv6 addresses need to be enclosed in square brackets
12951                                                         ips.append("[%s]" % addrinfo[4][0])
12952                                                 else:
12953                                                         ips.append(addrinfo[4][0])
12954                                         from random import shuffle
12955                                         shuffle(ips)
12956                                 except SystemExit, e:
12957                                         raise # Needed else can't exit
12958                                 except Exception, e:
12959                                         print "Notice:",str(e)
12960                                         dosyncuri=syncuri
12961
12962                         if ips:
12963                                 try:
12964                                         dosyncuri = syncuri.replace(
12965                                                 "//" + user_name + hostname + port + "/",
12966                                                 "//" + user_name + ips[0] + port + "/", 1)
12967                                 except SystemExit, e:
12968                                         raise # Needed else can't exit
12969                                 except Exception, e:
12970                                         print "Notice:",str(e)
12971                                         dosyncuri=syncuri
12972
12973                         if (retries==0):
12974                                 if "--ask" in myopts:
12975                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12976                                                 print
12977                                                 print "Quitting."
12978                                                 print
12979                                                 sys.exit(0)
12980                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12981                                 if "--quiet" not in myopts:
12982                                         print ">>> Starting rsync with "+dosyncuri+"..."
12983                         else:
12984                                 emergelog(xterm_titles,
12985                                         ">>> Starting retry %d of %d with %s" % \
12986                                                 (retries,maxretries,dosyncuri))
12987                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12988
12989                         if mytimestamp != 0 and "--quiet" not in myopts:
12990                                 print ">>> Checking server timestamp ..."
12991
12992                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12993
12994                         if "--debug" in myopts:
12995                                 print rsynccommand
12996
12997                         exitcode = os.EX_OK
12998                         servertimestamp = 0
12999                         # Even if there's no timestamp available locally, fetch the
13000                         # timestamp anyway as an initial probe to verify that the server is
13001                         # responsive.  This protects us from hanging indefinitely on a
13002                         # connection attempt to an unresponsive server which rsync's
13003                         # --timeout option does not prevent.
13004                         if True:
13005                                 # Temporary file for remote server timestamp comparison.
13006                                 from tempfile import mkstemp
13007                                 fd, tmpservertimestampfile = mkstemp()
13008                                 os.close(fd)
13009                                 mycommand = rsynccommand[:]
13010                                 mycommand.append(dosyncuri.rstrip("/") + \
13011                                         "/metadata/timestamp.chk")
13012                                 mycommand.append(tmpservertimestampfile)
13013                                 content = None
13014                                 mypids = []
13015                                 try:
13016                                         def timeout_handler(signum, frame):
13017                                                 raise portage.exception.PortageException("timed out")
13018                                         signal.signal(signal.SIGALRM, timeout_handler)
13019                                         # Timeout here in case the server is unresponsive.  The
13020                                         # --timeout rsync option doesn't apply to the initial
13021                                         # connection attempt.
13022                                         if rsync_initial_timeout:
13023                                                 signal.alarm(rsync_initial_timeout)
13024                                         try:
13025                                                 mypids.extend(portage.process.spawn(
13026                                                         mycommand, env=settings.environ(), returnpid=True))
13027                                                 exitcode = os.waitpid(mypids[0], 0)[1]
13028                                                 content = portage.grabfile(tmpservertimestampfile)
13029                                         finally:
13030                                                 if rsync_initial_timeout:
13031                                                         signal.alarm(0)
13032                                                 try:
13033                                                         os.unlink(tmpservertimestampfile)
13034                                                 except OSError:
13035                                                         pass
13036                                 except portage.exception.PortageException, e:
13037                                         # timed out
13038                                         print e
13039                                         del e
13040                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13041                                                 os.kill(mypids[0], signal.SIGTERM)
13042                                                 os.waitpid(mypids[0], 0)
13043                                         # This is the same code rsync uses for timeout.
13044                                         exitcode = 30
13045                                 else:
13046                                         if exitcode != os.EX_OK:
13047                                                 if exitcode & 0xff:
13048                                                         exitcode = (exitcode & 0xff) << 8
13049                                                 else:
13050                                                         exitcode = exitcode >> 8
13051                                 if mypids:
13052                                         portage.process.spawned_pids.remove(mypids[0])
13053                                 if content:
13054                                         try:
13055                                                 servertimestamp = time.mktime(time.strptime(
13056                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13057                                         except (OverflowError, ValueError):
13058                                                 pass
13059                                 del mycommand, mypids, content
13060                         if exitcode == os.EX_OK:
13061                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13062                                         emergelog(xterm_titles,
13063                                                 ">>> Cancelling sync -- Already current.")
13064                                         print
13065                                         print ">>>"
13066                                         print ">>> Timestamps on the server and in the local repository are the same."
13067                                         print ">>> Cancelling all further sync action. You are already up to date."
13068                                         print ">>>"
13069                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13070                                         print ">>>"
13071                                         print
13072                                         sys.exit(0)
13073                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13074                                         emergelog(xterm_titles,
13075                                                 ">>> Server out of date: %s" % dosyncuri)
13076                                         print
13077                                         print ">>>"
13078                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13079                                         print ">>>"
13080                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13081                                         print ">>>"
13082                                         print
13083                                         exitcode = SERVER_OUT_OF_DATE
13084                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13085                                         # actual sync
13086                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13087                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13088                                         if exitcode in [0,1,3,4,11,14,20,21]:
13089                                                 break
13090                         elif exitcode in [1,3,4,11,14,20,21]:
13091                                 break
13092                         else:
13093                                 # Code 2 indicates protocol incompatibility, which is expected
13094                                 # for servers with protocol < 29 that don't support
13095                                 # --prune-empty-directories.  Retry for a server that supports
13096                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13097                                 pass
13098
13099                         retries=retries+1
13100
13101                         if retries<=maxretries:
13102                                 print ">>> Retrying..."
13103                                 time.sleep(11)
13104                         else:
13105                                 # over retries
13106                                 # exit loop
13107                                 updatecache_flg=False
13108                                 exitcode = EXCEEDED_MAX_RETRIES
13109                                 break
13110
13111                 if (exitcode==0):
13112                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13113                 elif exitcode == SERVER_OUT_OF_DATE:
13114                         sys.exit(1)
13115                 elif exitcode == EXCEEDED_MAX_RETRIES:
13116                         sys.stderr.write(
13117                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13118                         sys.exit(1)
13119                 elif (exitcode>0):
13120                         msg = []
13121                         if exitcode==1:
13122                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13123                                 msg.append("that your SYNC statement is proper.")
13124                                 msg.append("SYNC=" + settings["SYNC"])
13125                         elif exitcode==11:
13126                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13127                                 msg.append("this means your disk is full, but can be caused by corruption")
13128                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13129                                 msg.append("and try again after the problem has been fixed.")
13130                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13131                         elif exitcode==20:
13132                                 msg.append("Rsync was killed before it finished.")
13133                         else:
13134                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13135                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13136                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13137                                 msg.append("temporary problem unless complications exist with your network")
13138                                 msg.append("(and possibly your system's filesystem) configuration.")
13139                         for line in msg:
13140                                 out.eerror(line)
13141                         sys.exit(exitcode)
13142         elif syncuri[:6]=="cvs://":
13143                 if not os.path.exists("/usr/bin/cvs"):
13144                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13145                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13146                         sys.exit(1)
13147                 cvsroot=syncuri[6:]
13148                 cvsdir=os.path.dirname(myportdir)
13149                 if not os.path.exists(myportdir+"/CVS"):
13150                         #initial checkout
13151                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13152                         if os.path.exists(cvsdir+"/gentoo-x86"):
13153                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13154                                 sys.exit(1)
13155                         try:
13156                                 os.rmdir(myportdir)
13157                         except OSError, e:
13158                                 if e.errno != errno.ENOENT:
13159                                         sys.stderr.write(
13160                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13161                                         sys.exit(1)
13162                                 del e
13163                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13164                                 print "!!! cvs checkout error; exiting."
13165                                 sys.exit(1)
13166                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13167                 else:
13168                         #cvs update
13169                         print ">>> Starting cvs update with "+syncuri+"..."
13170                         retval = portage.process.spawn_bash(
13171                                 "cd %s; cvs -z0 -q update -dP" % \
13172                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13173                         if retval != os.EX_OK:
13174                                 sys.exit(retval)
13175                 dosyncuri = syncuri
13176         else:
13177                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13178                         noiselevel=-1, level=logging.ERROR)
13179                 return 1
13180
13181         if updatecache_flg and  \
13182                 myaction != "metadata" and \
13183                 "metadata-transfer" not in settings.features:
13184                 updatecache_flg = False
13185
13186         # Reload the whole config from scratch.
13187         settings, trees, mtimedb = load_emerge_config(trees=trees)
13188         root_config = trees[settings["ROOT"]]["root_config"]
13189         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13190
13191         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13192                 action_metadata(settings, portdb, myopts)
13193
13194         if portage._global_updates(trees, mtimedb["updates"]):
13195                 mtimedb.commit()
13196                 # Reload the whole config from scratch.
13197                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13198                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13199                 root_config = trees[settings["ROOT"]]["root_config"]
13200
13201         mybestpv = portdb.xmatch("bestmatch-visible",
13202                 portage.const.PORTAGE_PACKAGE_ATOM)
13203         mypvs = portage.best(
13204                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13205                 portage.const.PORTAGE_PACKAGE_ATOM))
13206
13207         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13208
13209         if myaction != "metadata":
13210                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13211                         retval = portage.process.spawn(
13212                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13213                                 dosyncuri], env=settings.environ())
13214                         if retval != os.EX_OK:
13215                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13216
13217         if(mybestpv != mypvs) and not "--quiet" in myopts:
13218                 print
13219                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13220                 print red(" * ")+"that you update portage now, before any other packages are updated."
13221                 print
13222                 print red(" * ")+"To update portage, run 'emerge portage' now."
13223                 print
13224         
13225         display_news_notification(root_config, myopts)
13226         return os.EX_OK
13227
13228 def git_sync_timestamps(settings, portdir):
13229         """
13230         Since git doesn't preserve timestamps, synchronize timestamps between
13231         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13232         for a given file as long as the file in the working tree is not modified
13233         (relative to HEAD).
13234         """
13235         cache_dir = os.path.join(portdir, "metadata", "cache")
13236         if not os.path.isdir(cache_dir):
13237                 return os.EX_OK
13238         writemsg_level(">>> Synchronizing timestamps...\n")
13239
13240         from portage.cache.cache_errors import CacheError
13241         try:
13242                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13243                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13244         except CacheError, e:
13245                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13246                         level=logging.ERROR, noiselevel=-1)
13247                 return 1
13248
13249         ec_dir = os.path.join(portdir, "eclass")
13250         try:
13251                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13252                         if f.endswith(".eclass"))
13253         except OSError, e:
13254                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13255                         level=logging.ERROR, noiselevel=-1)
13256                 return 1
13257
13258         args = [portage.const.BASH_BINARY, "-c",
13259                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13260                 portage._shell_quote(portdir)]
13261         import subprocess
13262         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13263         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13264         rval = proc.wait()
13265         if rval != os.EX_OK:
13266                 return rval
13267
13268         modified_eclasses = set(ec for ec in ec_names \
13269                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13270
13271         updated_ec_mtimes = {}
13272
13273         for cpv in cache_db:
13274                 cpv_split = portage.catpkgsplit(cpv)
13275                 if cpv_split is None:
13276                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13277                                 level=logging.ERROR, noiselevel=-1)
13278                         continue
13279
13280                 cat, pn, ver, rev = cpv_split
13281                 cat, pf = portage.catsplit(cpv)
13282                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13283                 if relative_eb_path in modified_files:
13284                         continue
13285
13286                 try:
13287                         cache_entry = cache_db[cpv]
13288                         eb_mtime = cache_entry.get("_mtime_")
13289                         ec_mtimes = cache_entry.get("_eclasses_")
13290                 except KeyError:
13291                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13292                                 level=logging.ERROR, noiselevel=-1)
13293                         continue
13294                 except CacheError, e:
13295                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13296                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13297                         continue
13298
13299                 if eb_mtime is None:
13300                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13301                                 level=logging.ERROR, noiselevel=-1)
13302                         continue
13303
13304                 try:
13305                         eb_mtime = long(eb_mtime)
13306                 except ValueError:
13307                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13308                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13309                         continue
13310
13311                 if ec_mtimes is None:
13312                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13313                                 level=logging.ERROR, noiselevel=-1)
13314                         continue
13315
13316                 if modified_eclasses.intersection(ec_mtimes):
13317                         continue
13318
13319                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13320                 if missing_eclasses:
13321                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13322                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13323                                 noiselevel=-1)
13324                         continue
13325
13326                 eb_path = os.path.join(portdir, relative_eb_path)
13327                 try:
13328                         current_eb_mtime = os.stat(eb_path)
13329                 except OSError:
13330                         writemsg_level("!!! Missing ebuild: %s\n" % \
13331                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13332                         continue
13333
13334                 inconsistent = False
13335                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13336                         updated_mtime = updated_ec_mtimes.get(ec)
13337                         if updated_mtime is not None and updated_mtime != ec_mtime:
13338                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13339                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13340                                 inconsistent = True
13341                                 break
13342
13343                 if inconsistent:
13344                         continue
13345
13346                 if current_eb_mtime != eb_mtime:
13347                         os.utime(eb_path, (eb_mtime, eb_mtime))
13348
13349                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13350                         if ec in updated_ec_mtimes:
13351                                 continue
13352                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13353                         current_mtime = long(os.stat(ec_path).st_mtime)
13354                         if current_mtime != ec_mtime:
13355                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13356                         updated_ec_mtimes[ec] = ec_mtime
13357
13358         return os.EX_OK
13359
13360 def action_metadata(settings, portdb, myopts):
13361         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13362         old_umask = os.umask(0002)
13363         cachedir = os.path.normpath(settings.depcachedir)
13364         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13365                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13366                                         "/sys", "/tmp", "/usr",  "/var"]:
13367                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13368                         "ROOT DIRECTORY ON YOUR SYSTEM."
13369                 print >> sys.stderr, \
13370                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13371                 sys.exit(73)
13372         if not os.path.exists(cachedir):
13373                 os.mkdir(cachedir)
13374
13375         ec = portage.eclass_cache.cache(portdb.porttree_root)
13376         myportdir = os.path.realpath(settings["PORTDIR"])
13377         cm = settings.load_best_module("portdbapi.metadbmodule")(
13378                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13379
13380         from portage.cache import util
13381
13382         class percentage_noise_maker(util.quiet_mirroring):
13383                 def __init__(self, dbapi):
13384                         self.dbapi = dbapi
13385                         self.cp_all = dbapi.cp_all()
13386                         l = len(self.cp_all)
13387                         self.call_update_min = 100000000
13388                         self.min_cp_all = l/100.0
13389                         self.count = 1
13390                         self.pstr = ''
13391
13392                 def __iter__(self):
13393                         for x in self.cp_all:
13394                                 self.count += 1
13395                                 if self.count > self.min_cp_all:
13396                                         self.call_update_min = 0
13397                                         self.count = 0
13398                                 for y in self.dbapi.cp_list(x):
13399                                         yield y
13400                         self.call_update_mine = 0
13401
13402                 def update(self, *arg):
13403                         try:
13404                                 self.pstr = int(self.pstr) + 1
13405                         except ValueError:
13406                                 self.pstr = 1
13407                         sys.stdout.write("%s%i%%" % \
13408                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13409                         sys.stdout.flush()
13410                         self.call_update_min = 10000000
13411
13412                 def finish(self, *arg):
13413                         sys.stdout.write("\b\b\b\b100%\n")
13414                         sys.stdout.flush()
13415
13416         if "--quiet" in myopts:
13417                 def quicky_cpv_generator(cp_all_list):
13418                         for x in cp_all_list:
13419                                 for y in portdb.cp_list(x):
13420                                         yield y
13421                 source = quicky_cpv_generator(portdb.cp_all())
13422                 noise_maker = portage.cache.util.quiet_mirroring()
13423         else:
13424                 noise_maker = source = percentage_noise_maker(portdb)
13425         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13426                 eclass_cache=ec, verbose_instance=noise_maker)
13427
13428         sys.stdout.flush()
13429         os.umask(old_umask)
13430
13431 def action_regen(settings, portdb, max_jobs, max_load):
13432         xterm_titles = "notitles" not in settings.features
13433         emergelog(xterm_titles, " === regen")
13434         #regenerate cache entries
13435         portage.writemsg_stdout("Regenerating cache entries...\n")
13436         try:
13437                 os.close(sys.stdin.fileno())
13438         except SystemExit, e:
13439                 raise # Needed else can't exit
13440         except:
13441                 pass
13442         sys.stdout.flush()
13443
13444         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13445         regen.run()
13446
13447         portage.writemsg_stdout("done!\n")
13448         return regen.returncode
13449
13450 def action_config(settings, trees, myopts, myfiles):
13451         if len(myfiles) != 1:
13452                 print red("!!! config can only take a single package atom at this time\n")
13453                 sys.exit(1)
13454         if not is_valid_package_atom(myfiles[0]):
13455                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13456                         noiselevel=-1)
13457                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13458                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13459                 sys.exit(1)
13460         print
13461         try:
13462                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13463         except portage.exception.AmbiguousPackageName, e:
13464                 # Multiple matches thrown from cpv_expand
13465                 pkgs = e.args[0]
13466         if len(pkgs) == 0:
13467                 print "No packages found.\n"
13468                 sys.exit(0)
13469         elif len(pkgs) > 1:
13470                 if "--ask" in myopts:
13471                         options = []
13472                         print "Please select a package to configure:"
13473                         idx = 0
13474                         for pkg in pkgs:
13475                                 idx += 1
13476                                 options.append(str(idx))
13477                                 print options[-1]+") "+pkg
13478                         print "X) Cancel"
13479                         options.append("X")
13480                         idx = userquery("Selection?", options)
13481                         if idx == "X":
13482                                 sys.exit(0)
13483                         pkg = pkgs[int(idx)-1]
13484                 else:
13485                         print "The following packages available:"
13486                         for pkg in pkgs:
13487                                 print "* "+pkg
13488                         print "\nPlease use a specific atom or the --ask option."
13489                         sys.exit(1)
13490         else:
13491                 pkg = pkgs[0]
13492
13493         print
13494         if "--ask" in myopts:
13495                 if userquery("Ready to configure "+pkg+"?") == "No":
13496                         sys.exit(0)
13497         else:
13498                 print "Configuring pkg..."
13499         print
13500         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13501         mysettings = portage.config(clone=settings)
13502         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13503         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13504         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13505                 mysettings,
13506                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13507                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13508         if retval == os.EX_OK:
13509                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13510                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13511         print
13512
13513 def action_info(settings, trees, myopts, myfiles):
13514         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13515                 settings.profile_path, settings["CHOST"],
13516                 trees[settings["ROOT"]]["vartree"].dbapi)
13517         header_width = 65
13518         header_title = "System Settings"
13519         if myfiles:
13520                 print header_width * "="
13521                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13522         print header_width * "="
13523         print "System uname: "+platform.platform(aliased=1)
13524
13525         lastSync = portage.grabfile(os.path.join(
13526                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13527         print "Timestamp of tree:",
13528         if lastSync:
13529                 print lastSync[0]
13530         else:
13531                 print "Unknown"
13532
13533         output=commands.getstatusoutput("distcc --version")
13534         if not output[0]:
13535                 print str(output[1].split("\n",1)[0]),
13536                 if "distcc" in settings.features:
13537                         print "[enabled]"
13538                 else:
13539                         print "[disabled]"
13540
13541         output=commands.getstatusoutput("ccache -V")
13542         if not output[0]:
13543                 print str(output[1].split("\n",1)[0]),
13544                 if "ccache" in settings.features:
13545                         print "[enabled]"
13546                 else:
13547                         print "[disabled]"
13548
13549         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13550                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13551         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13552         myvars  = portage.util.unique_array(myvars)
13553         myvars.sort()
13554
13555         for x in myvars:
13556                 if portage.isvalidatom(x):
13557                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13558                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13559                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13560                         pkgs = []
13561                         for pn, ver, rev in pkg_matches:
13562                                 if rev != "r0":
13563                                         pkgs.append(ver + "-" + rev)
13564                                 else:
13565                                         pkgs.append(ver)
13566                         if pkgs:
13567                                 pkgs = ", ".join(pkgs)
13568                                 print "%-20s %s" % (x+":", pkgs)
13569                 else:
13570                         print "%-20s %s" % (x+":", "[NOT VALID]")
13571
13572         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13573
13574         if "--verbose" in myopts:
13575                 myvars=settings.keys()
13576         else:
13577                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13578                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13579                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13580                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13581
13582                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13583
13584         myvars = portage.util.unique_array(myvars)
13585         use_expand = settings.get('USE_EXPAND', '').split()
13586         use_expand.sort()
13587         use_expand_hidden = set(
13588                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13589         alphabetical_use = '--alphabetical' in myopts
13590         root_config = trees[settings["ROOT"]]['root_config']
13591         unset_vars = []
13592         myvars.sort()
13593         for x in myvars:
13594                 if x in settings:
13595                         if x != "USE":
13596                                 print '%s="%s"' % (x, settings[x])
13597                         else:
13598                                 use = set(settings["USE"].split())
13599                                 for varname in use_expand:
13600                                         flag_prefix = varname.lower() + "_"
13601                                         for f in list(use):
13602                                                 if f.startswith(flag_prefix):
13603                                                         use.remove(f)
13604                                 use = list(use)
13605                                 use.sort()
13606                                 print 'USE="%s"' % " ".join(use),
13607                                 for varname in use_expand:
13608                                         myval = settings.get(varname)
13609                                         if myval:
13610                                                 print '%s="%s"' % (varname, myval),
13611                                 print
13612                 else:
13613                         unset_vars.append(x)
13614         if unset_vars:
13615                 print "Unset:  "+", ".join(unset_vars)
13616         print
13617
13618         if "--debug" in myopts:
13619                 for x in dir(portage):
13620                         module = getattr(portage, x)
13621                         if "cvs_id_string" in dir(module):
13622                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13623
13624         # See if we can find any packages installed matching the strings
13625         # passed on the command line
13626         mypkgs = []
13627         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13628         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13629         for x in myfiles:
13630                 mypkgs.extend(vardb.match(x))
13631
13632         # If some packages were found...
13633         if mypkgs:
13634                 # Get our global settings (we only print stuff if it varies from
13635                 # the current config)
13636                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13637                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13638                 auxkeys.append('DEFINED_PHASES')
13639                 global_vals = {}
13640                 pkgsettings = portage.config(clone=settings)
13641
13642                 for myvar in mydesiredvars:
13643                         global_vals[myvar] = set(settings.get(myvar, "").split())
13644
13645                 # Loop through each package
13646                 # Only print settings if they differ from global settings
13647                 header_title = "Package Settings"
13648                 print header_width * "="
13649                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13650                 print header_width * "="
13651                 from portage.output import EOutput
13652                 out = EOutput()
13653                 for cpv in mypkgs:
13654                         # Get all package specific variables
13655                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13656                         pkg = Package(built=True, cpv=cpv,
13657                                 installed=True, metadata=izip(Package.metadata_keys,
13658                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13659                                 root_config=root_config, type_name='installed')
13660                         valuesmap = {}
13661                         for k in auxkeys:
13662                                 valuesmap[k] = set(metadata[k].split())
13663
13664                         diff_values = {}
13665                         for myvar in mydesiredvars:
13666                                 # If the package variable doesn't match the
13667                                 # current global variable, something has changed
13668                                 # so set diff_found so we know to print
13669                                 if valuesmap[myvar] != global_vals[myvar]:
13670                                         diff_values[myvar] = valuesmap[myvar]
13671
13672                         print "\n%s was built with the following:" % \
13673                                 colorize("INFORM", str(pkg.cpv))
13674
13675                         pkgsettings.setcpv(pkg)
13676                         forced_flags = set(chain(pkgsettings.useforce,
13677                                 pkgsettings.usemask))
13678                         use = set(pkg.use.enabled)
13679                         use.discard(pkgsettings.get('ARCH'))
13680                         use_expand_flags = set()
13681                         use_enabled = {}
13682                         use_disabled = {}
13683                         for varname in use_expand:
13684                                 flag_prefix = varname.lower() + "_"
13685                                 for f in use:
13686                                         if f.startswith(flag_prefix):
13687                                                 use_expand_flags.add(f)
13688                                                 use_enabled.setdefault(
13689                                                         varname.upper(), []).append(f[len(flag_prefix):])
13690
13691                                 for f in pkg.iuse.all:
13692                                         if f.startswith(flag_prefix):
13693                                                 use_expand_flags.add(f)
13694                                                 if f not in use:
13695                                                         use_disabled.setdefault(
13696                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13697
13698                         var_order = set(use_enabled)
13699                         var_order.update(use_disabled)
13700                         var_order = sorted(var_order)
13701                         var_order.insert(0, 'USE')
13702                         use.difference_update(use_expand_flags)
13703                         use_enabled['USE'] = list(use)
13704                         use_disabled['USE'] = []
13705
13706                         for f in pkg.iuse.all:
13707                                 if f not in use and \
13708                                         f not in use_expand_flags:
13709                                         use_disabled['USE'].append(f)
13710
13711                         for varname in var_order:
13712                                 if varname in use_expand_hidden:
13713                                         continue
13714                                 flags = []
13715                                 for f in use_enabled.get(varname, []):
13716                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13717                                 for f in use_disabled.get(varname, []):
13718                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13719                                 if alphabetical_use:
13720                                         flags.sort(key=UseFlagDisplay.sort_combined)
13721                                 else:
13722                                         flags.sort(key=UseFlagDisplay.sort_separated)
13723                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13724                         print
13725
13726                         # If a difference was found, print the info for
13727                         # this package.
13728                         if diff_values:
13729                                 # Print package info
13730                                 for myvar in mydesiredvars:
13731                                         if myvar in diff_values:
13732                                                 mylist = list(diff_values[myvar])
13733                                                 mylist.sort()
13734                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13735                         print
13736
13737                         if metadata['DEFINED_PHASES']:
13738                                 if 'info' not in metadata['DEFINED_PHASES'].split():
13739                                         continue
13740
13741                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13742                         ebuildpath = vardb.findname(pkg.cpv)
13743                         if not ebuildpath or not os.path.exists(ebuildpath):
13744                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13745                                 continue
13746                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13747                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13748                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13749                                 tree="vartree")
13750
13751 def action_search(root_config, myopts, myfiles, spinner):
13752         if not myfiles:
13753                 print "emerge: no search terms provided."
13754         else:
13755                 searchinstance = search(root_config,
13756                         spinner, "--searchdesc" in myopts,
13757                         "--quiet" not in myopts, "--usepkg" in myopts,
13758                         "--usepkgonly" in myopts)
13759                 for mysearch in myfiles:
13760                         try:
13761                                 searchinstance.execute(mysearch)
13762                         except re.error, comment:
13763                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13764                                 sys.exit(1)
13765                         searchinstance.output()
13766
13767 def action_depclean(settings, trees, ldpath_mtimes,
13768         myopts, action, myfiles, spinner):
13769         # Kill packages that aren't explicitly merged or are required as a
13770         # dependency of another package. World file is explicit.
13771
13772         # Global depclean or prune operations are not very safe when there are
13773         # missing dependencies since it's unknown how badly incomplete
13774         # the dependency graph is, and we might accidentally remove packages
13775         # that should have been pulled into the graph. On the other hand, it's
13776         # relatively safe to ignore missing deps when only asked to remove
13777         # specific packages.
13778         allow_missing_deps = len(myfiles) > 0
13779
13780         msg = []
13781         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13782         msg.append("mistakes. Packages that are part of the world set will always\n")
13783         msg.append("be kept.  They can be manually added to this set with\n")
13784         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13785         msg.append("package.provided (see portage(5)) will be removed by\n")
13786         msg.append("depclean, even if they are part of the world set.\n")
13787         msg.append("\n")
13788         msg.append("As a safety measure, depclean will not remove any packages\n")
13789         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13790         msg.append("consequence, it is often necessary to run %s\n" % \
13791                 good("`emerge --update"))
13792         msg.append(good("--newuse --deep @system @world`") + \
13793                 " prior to depclean.\n")
13794
13795         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13796                 portage.writemsg_stdout("\n")
13797                 for x in msg:
13798                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13799
13800         xterm_titles = "notitles" not in settings.features
13801         myroot = settings["ROOT"]
13802         root_config = trees[myroot]["root_config"]
13803         getSetAtoms = root_config.setconfig.getSetAtoms
13804         vardb = trees[myroot]["vartree"].dbapi
13805
13806         required_set_names = ("system", "world")
13807         required_sets = {}
13808         set_args = []
13809
13810         for s in required_set_names:
13811                 required_sets[s] = InternalPackageSet(
13812                         initial_atoms=getSetAtoms(s))
13813
13814         
13815         # When removing packages, use a temporary version of world
13816         # which excludes packages that are intended to be eligible for
13817         # removal.
13818         world_temp_set = required_sets["world"]
13819         system_set = required_sets["system"]
13820
13821         if not system_set or not world_temp_set:
13822
13823                 if not system_set:
13824                         writemsg_level("!!! You have no system list.\n",
13825                                 level=logging.ERROR, noiselevel=-1)
13826
13827                 if not world_temp_set:
13828                         writemsg_level("!!! You have no world file.\n",
13829                                         level=logging.WARNING, noiselevel=-1)
13830
13831                 writemsg_level("!!! Proceeding is likely to " + \
13832                         "break your installation.\n",
13833                         level=logging.WARNING, noiselevel=-1)
13834                 if "--pretend" not in myopts:
13835                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13836
13837         if action == "depclean":
13838                 emergelog(xterm_titles, " >>> depclean")
13839
13840         import textwrap
13841         args_set = InternalPackageSet()
13842         if myfiles:
13843                 for x in myfiles:
13844                         if not is_valid_package_atom(x):
13845                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13846                                         level=logging.ERROR, noiselevel=-1)
13847                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13848                                 return
13849                         try:
13850                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13851                         except portage.exception.AmbiguousPackageName, e:
13852                                 msg = "The short ebuild name \"" + x + \
13853                                         "\" is ambiguous.  Please specify " + \
13854                                         "one of the following " + \
13855                                         "fully-qualified ebuild names instead:"
13856                                 for line in textwrap.wrap(msg, 70):
13857                                         writemsg_level("!!! %s\n" % (line,),
13858                                                 level=logging.ERROR, noiselevel=-1)
13859                                 for i in e[0]:
13860                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13861                                                 level=logging.ERROR, noiselevel=-1)
13862                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13863                                 return
13864                         args_set.add(atom)
13865                 matched_packages = False
13866                 for x in args_set:
13867                         if vardb.match(x):
13868                                 matched_packages = True
13869                                 break
13870                 if not matched_packages:
13871                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13872                                 action)
13873                         return
13874
13875         writemsg_level("\nCalculating dependencies  ")
13876         resolver_params = create_depgraph_params(myopts, "remove")
13877         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13878         vardb = resolver.trees[myroot]["vartree"].dbapi
13879
13880         if action == "depclean":
13881
13882                 if args_set:
13883                         # Pull in everything that's installed but not matched
13884                         # by an argument atom since we don't want to clean any
13885                         # package if something depends on it.
13886
13887                         world_temp_set.clear()
13888                         for pkg in vardb:
13889                                 spinner.update()
13890
13891                                 try:
13892                                         if args_set.findAtomForPackage(pkg) is None:
13893                                                 world_temp_set.add("=" + pkg.cpv)
13894                                                 continue
13895                                 except portage.exception.InvalidDependString, e:
13896                                         show_invalid_depstring_notice(pkg,
13897                                                 pkg.metadata["PROVIDE"], str(e))
13898                                         del e
13899                                         world_temp_set.add("=" + pkg.cpv)
13900                                         continue
13901
13902         elif action == "prune":
13903
13904                 # Pull in everything that's installed since we don't
13905                 # to prune a package if something depends on it.
13906                 world_temp_set.clear()
13907                 world_temp_set.update(vardb.cp_all())
13908
13909                 if not args_set:
13910
13911                         # Try to prune everything that's slotted.
13912                         for cp in vardb.cp_all():
13913                                 if len(vardb.cp_list(cp)) > 1:
13914                                         args_set.add(cp)
13915
13916                 # Remove atoms from world that match installed packages
13917                 # that are also matched by argument atoms, but do not remove
13918                 # them if they match the highest installed version.
13919                 for pkg in vardb:
13920                         spinner.update()
13921                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13922                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13923                                 raise AssertionError("package expected in matches: " + \
13924                                         "cp = %s, cpv = %s matches = %s" % \
13925                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13926
13927                         highest_version = pkgs_for_cp[-1]
13928                         if pkg == highest_version:
13929                                 # pkg is the highest version
13930                                 world_temp_set.add("=" + pkg.cpv)
13931                                 continue
13932
13933                         if len(pkgs_for_cp) <= 1:
13934                                 raise AssertionError("more packages expected: " + \
13935                                         "cp = %s, cpv = %s matches = %s" % \
13936                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13937
13938                         try:
13939                                 if args_set.findAtomForPackage(pkg) is None:
13940                                         world_temp_set.add("=" + pkg.cpv)
13941                                         continue
13942                         except portage.exception.InvalidDependString, e:
13943                                 show_invalid_depstring_notice(pkg,
13944                                         pkg.metadata["PROVIDE"], str(e))
13945                                 del e
13946                                 world_temp_set.add("=" + pkg.cpv)
13947                                 continue
13948
13949         set_args = {}
13950         for s, package_set in required_sets.iteritems():
13951                 set_atom = SETPREFIX + s
13952                 set_arg = SetArg(arg=set_atom, set=package_set,
13953                         root_config=resolver.roots[myroot])
13954                 set_args[s] = set_arg
13955                 for atom in set_arg.set:
13956                         resolver._dep_stack.append(
13957                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13958                         resolver.digraph.add(set_arg, None)
13959
13960         success = resolver._complete_graph()
13961         writemsg_level("\b\b... done!\n")
13962
13963         resolver.display_problems()
13964
13965         if not success:
13966                 return 1
13967
13968         def unresolved_deps():
13969
13970                 unresolvable = set()
13971                 for dep in resolver._initially_unsatisfied_deps:
13972                         if isinstance(dep.parent, Package) and \
13973                                 (dep.priority > UnmergeDepPriority.SOFT):
13974                                 unresolvable.add((dep.atom, dep.parent.cpv))
13975
13976                 if not unresolvable:
13977                         return False
13978
13979                 if unresolvable and not allow_missing_deps:
13980                         prefix = bad(" * ")
13981                         msg = []
13982                         msg.append("Dependencies could not be completely resolved due to")
13983                         msg.append("the following required packages not being installed:")
13984                         msg.append("")
13985                         for atom, parent in unresolvable:
13986                                 msg.append("  %s pulled in by:" % (atom,))
13987                                 msg.append("    %s" % (parent,))
13988                                 msg.append("")
13989                         msg.append("Have you forgotten to run " + \
13990                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13991                         msg.append(("to %s? It may be necessary to manually " + \
13992                                 "uninstall packages that no longer") % action)
13993                         msg.append("exist in the portage tree since " + \
13994                                 "it may not be possible to satisfy their")
13995                         msg.append("dependencies.  Also, be aware of " + \
13996                                 "the --with-bdeps option that is documented")
13997                         msg.append("in " + good("`man emerge`") + ".")
13998                         if action == "prune":
13999                                 msg.append("")
14000                                 msg.append("If you would like to ignore " + \
14001                                         "dependencies then use %s." % good("--nodeps"))
14002                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14003                                 level=logging.ERROR, noiselevel=-1)
14004                         return True
14005                 return False
14006
14007         if unresolved_deps():
14008                 return 1
14009
14010         graph = resolver.digraph.copy()
14011         required_pkgs_total = 0
14012         for node in graph:
14013                 if isinstance(node, Package):
14014                         required_pkgs_total += 1
14015
14016         def show_parents(child_node):
14017                 parent_nodes = graph.parent_nodes(child_node)
14018                 if not parent_nodes:
14019                         # With --prune, the highest version can be pulled in without any
14020                         # real parent since all installed packages are pulled in.  In that
14021                         # case there's nothing to show here.
14022                         return
14023                 parent_strs = []
14024                 for node in parent_nodes:
14025                         parent_strs.append(str(getattr(node, "cpv", node)))
14026                 parent_strs.sort()
14027                 msg = []
14028                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
14029                 for parent_str in parent_strs:
14030                         msg.append("    %s\n" % (parent_str,))
14031                 msg.append("\n")
14032                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14033
14034         def cmp_pkg_cpv(pkg1, pkg2):
14035                 """Sort Package instances by cpv."""
14036                 if pkg1.cpv > pkg2.cpv:
14037                         return 1
14038                 elif pkg1.cpv == pkg2.cpv:
14039                         return 0
14040                 else:
14041                         return -1
14042
14043         def create_cleanlist():
14044                 pkgs_to_remove = []
14045
14046                 if action == "depclean":
14047                         if args_set:
14048
14049                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14050                                         arg_atom = None
14051                                         try:
14052                                                 arg_atom = args_set.findAtomForPackage(pkg)
14053                                         except portage.exception.InvalidDependString:
14054                                                 # this error has already been displayed by now
14055                                                 continue
14056
14057                                         if arg_atom:
14058                                                 if pkg not in graph:
14059                                                         pkgs_to_remove.append(pkg)
14060                                                 elif "--verbose" in myopts:
14061                                                         show_parents(pkg)
14062
14063                         else:
14064                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14065                                         if pkg not in graph:
14066                                                 pkgs_to_remove.append(pkg)
14067                                         elif "--verbose" in myopts:
14068                                                 show_parents(pkg)
14069
14070                 elif action == "prune":
14071                         # Prune really uses all installed instead of world. It's not
14072                         # a real reverse dependency so don't display it as such.
14073                         graph.remove(set_args["world"])
14074
14075                         for atom in args_set:
14076                                 for pkg in vardb.match_pkgs(atom):
14077                                         if pkg not in graph:
14078                                                 pkgs_to_remove.append(pkg)
14079                                         elif "--verbose" in myopts:
14080                                                 show_parents(pkg)
14081
14082                 if not pkgs_to_remove:
14083                         writemsg_level(
14084                                 ">>> No packages selected for removal by %s\n" % action)
14085                         if "--verbose" not in myopts:
14086                                 writemsg_level(
14087                                         ">>> To see reverse dependencies, use %s\n" % \
14088                                                 good("--verbose"))
14089                         if action == "prune":
14090                                 writemsg_level(
14091                                         ">>> To ignore dependencies, use %s\n" % \
14092                                                 good("--nodeps"))
14093
14094                 return pkgs_to_remove
14095
14096         cleanlist = create_cleanlist()
14097
14098         if len(cleanlist):
14099                 clean_set = set(cleanlist)
14100
14101                 # Check if any of these package are the sole providers of libraries
14102                 # with consumers that have not been selected for removal. If so, these
14103                 # packages and any dependencies need to be added to the graph.
14104                 real_vardb = trees[myroot]["vartree"].dbapi
14105                 linkmap = real_vardb.linkmap
14106                 liblist = linkmap.listLibraryObjects()
14107                 consumer_cache = {}
14108                 provider_cache = {}
14109                 soname_cache = {}
14110                 consumer_map = {}
14111
14112                 writemsg_level(">>> Checking for lib consumers...\n")
14113
14114                 for pkg in cleanlist:
14115                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14116                         provided_libs = set()
14117
14118                         for lib in liblist:
14119                                 if pkg_dblink.isowner(lib, myroot):
14120                                         provided_libs.add(lib)
14121
14122                         if not provided_libs:
14123                                 continue
14124
14125                         consumers = {}
14126                         for lib in provided_libs:
14127                                 lib_consumers = consumer_cache.get(lib)
14128                                 if lib_consumers is None:
14129                                         lib_consumers = linkmap.findConsumers(lib)
14130                                         consumer_cache[lib] = lib_consumers
14131                                 if lib_consumers:
14132                                         consumers[lib] = lib_consumers
14133
14134                         if not consumers:
14135                                 continue
14136
14137                         for lib, lib_consumers in consumers.items():
14138                                 for consumer_file in list(lib_consumers):
14139                                         if pkg_dblink.isowner(consumer_file, myroot):
14140                                                 lib_consumers.remove(consumer_file)
14141                                 if not lib_consumers:
14142                                         del consumers[lib]
14143
14144                         if not consumers:
14145                                 continue
14146
14147                         for lib, lib_consumers in consumers.iteritems():
14148
14149                                 soname = soname_cache.get(lib)
14150                                 if soname is None:
14151                                         soname = linkmap.getSoname(lib)
14152                                         soname_cache[lib] = soname
14153
14154                                 consumer_providers = []
14155                                 for lib_consumer in lib_consumers:
14156                                         providers = provider_cache.get(lib)
14157                                         if providers is None:
14158                                                 providers = linkmap.findProviders(lib_consumer)
14159                                                 provider_cache[lib_consumer] = providers
14160                                         if soname not in providers:
14161                                                 # Why does this happen?
14162                                                 continue
14163                                         consumer_providers.append(
14164                                                 (lib_consumer, providers[soname]))
14165
14166                                 consumers[lib] = consumer_providers
14167
14168                         consumer_map[pkg] = consumers
14169
14170                 if consumer_map:
14171
14172                         search_files = set()
14173                         for consumers in consumer_map.itervalues():
14174                                 for lib, consumer_providers in consumers.iteritems():
14175                                         for lib_consumer, providers in consumer_providers:
14176                                                 search_files.add(lib_consumer)
14177                                                 search_files.update(providers)
14178
14179                         writemsg_level(">>> Assigning files to packages...\n")
14180                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14181
14182                         for pkg, consumers in consumer_map.items():
14183                                 for lib, consumer_providers in consumers.items():
14184                                         lib_consumers = set()
14185
14186                                         for lib_consumer, providers in consumer_providers:
14187                                                 owner_set = file_owners.get(lib_consumer)
14188                                                 provider_dblinks = set()
14189                                                 provider_pkgs = set()
14190
14191                                                 if len(providers) > 1:
14192                                                         for provider in providers:
14193                                                                 provider_set = file_owners.get(provider)
14194                                                                 if provider_set is not None:
14195                                                                         provider_dblinks.update(provider_set)
14196
14197                                                 if len(provider_dblinks) > 1:
14198                                                         for provider_dblink in provider_dblinks:
14199                                                                 pkg_key = ("installed", myroot,
14200                                                                         provider_dblink.mycpv, "nomerge")
14201                                                                 if pkg_key not in clean_set:
14202                                                                         provider_pkgs.add(vardb.get(pkg_key))
14203
14204                                                 if provider_pkgs:
14205                                                         continue
14206
14207                                                 if owner_set is not None:
14208                                                         lib_consumers.update(owner_set)
14209
14210                                         for consumer_dblink in list(lib_consumers):
14211                                                 if ("installed", myroot, consumer_dblink.mycpv,
14212                                                         "nomerge") in clean_set:
14213                                                         lib_consumers.remove(consumer_dblink)
14214                                                         continue
14215
14216                                         if lib_consumers:
14217                                                 consumers[lib] = lib_consumers
14218                                         else:
14219                                                 del consumers[lib]
14220                                 if not consumers:
14221                                         del consumer_map[pkg]
14222
14223                 if consumer_map:
14224                         # TODO: Implement a package set for rebuilding consumer packages.
14225
14226                         msg = "In order to avoid breakage of link level " + \
14227                                 "dependencies, one or more packages will not be removed. " + \
14228                                 "This can be solved by rebuilding " + \
14229                                 "the packages that pulled them in."
14230
14231                         prefix = bad(" * ")
14232                         from textwrap import wrap
14233                         writemsg_level("".join(prefix + "%s\n" % line for \
14234                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14235
14236                         msg = []
14237                         for pkg, consumers in consumer_map.iteritems():
14238                                 unique_consumers = set(chain(*consumers.values()))
14239                                 unique_consumers = sorted(consumer.mycpv \
14240                                         for consumer in unique_consumers)
14241                                 msg.append("")
14242                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14243                                 for consumer in unique_consumers:
14244                                         msg.append("    %s" % (consumer,))
14245                         msg.append("")
14246                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14247                                 level=logging.WARNING, noiselevel=-1)
14248
14249                         # Add lib providers to the graph as children of lib consumers,
14250                         # and also add any dependencies pulled in by the provider.
14251                         writemsg_level(">>> Adding lib providers to graph...\n")
14252
14253                         for pkg, consumers in consumer_map.iteritems():
14254                                 for consumer_dblink in set(chain(*consumers.values())):
14255                                         consumer_pkg = vardb.get(("installed", myroot,
14256                                                 consumer_dblink.mycpv, "nomerge"))
14257                                         if not resolver._add_pkg(pkg,
14258                                                 Dependency(parent=consumer_pkg,
14259                                                 priority=UnmergeDepPriority(runtime=True),
14260                                                 root=pkg.root)):
14261                                                 resolver.display_problems()
14262                                                 return 1
14263
14264                         writemsg_level("\nCalculating dependencies  ")
14265                         success = resolver._complete_graph()
14266                         writemsg_level("\b\b... done!\n")
14267                         resolver.display_problems()
14268                         if not success:
14269                                 return 1
14270                         if unresolved_deps():
14271                                 return 1
14272
14273                         graph = resolver.digraph.copy()
14274                         required_pkgs_total = 0
14275                         for node in graph:
14276                                 if isinstance(node, Package):
14277                                         required_pkgs_total += 1
14278                         cleanlist = create_cleanlist()
14279                         if not cleanlist:
14280                                 return 0
14281                         clean_set = set(cleanlist)
14282
14283                 # Use a topological sort to create an unmerge order such that
14284                 # each package is unmerged before it's dependencies. This is
14285                 # necessary to avoid breaking things that may need to run
14286                 # during pkg_prerm or pkg_postrm phases.
14287
14288                 # Create a new graph to account for dependencies between the
14289                 # packages being unmerged.
14290                 graph = digraph()
14291                 del cleanlist[:]
14292
14293                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14294                 runtime = UnmergeDepPriority(runtime=True)
14295                 runtime_post = UnmergeDepPriority(runtime_post=True)
14296                 buildtime = UnmergeDepPriority(buildtime=True)
14297                 priority_map = {
14298                         "RDEPEND": runtime,
14299                         "PDEPEND": runtime_post,
14300                         "DEPEND": buildtime,
14301                 }
14302
14303                 for node in clean_set:
14304                         graph.add(node, None)
14305                         mydeps = []
14306                         node_use = node.metadata["USE"].split()
14307                         for dep_type in dep_keys:
14308                                 depstr = node.metadata[dep_type]
14309                                 if not depstr:
14310                                         continue
14311                                 try:
14312                                         portage.dep._dep_check_strict = False
14313                                         success, atoms = portage.dep_check(depstr, None, settings,
14314                                                 myuse=node_use, trees=resolver._graph_trees,
14315                                                 myroot=myroot)
14316                                 finally:
14317                                         portage.dep._dep_check_strict = True
14318                                 if not success:
14319                                         # Ignore invalid deps of packages that will
14320                                         # be uninstalled anyway.
14321                                         continue
14322
14323                                 priority = priority_map[dep_type]
14324                                 for atom in atoms:
14325                                         if not isinstance(atom, portage.dep.Atom):
14326                                                 # Ignore invalid atoms returned from dep_check().
14327                                                 continue
14328                                         if atom.blocker:
14329                                                 continue
14330                                         matches = vardb.match_pkgs(atom)
14331                                         if not matches:
14332                                                 continue
14333                                         for child_node in matches:
14334                                                 if child_node in clean_set:
14335                                                         graph.add(child_node, node, priority=priority)
14336
14337                 ordered = True
14338                 if len(graph.order) == len(graph.root_nodes()):
14339                         # If there are no dependencies between packages
14340                         # let unmerge() group them by cat/pn.
14341                         ordered = False
14342                         cleanlist = [pkg.cpv for pkg in graph.order]
14343                 else:
14344                         # Order nodes from lowest to highest overall reference count for
14345                         # optimal root node selection.
14346                         node_refcounts = {}
14347                         for node in graph.order:
14348                                 node_refcounts[node] = len(graph.parent_nodes(node))
14349                         def cmp_reference_count(node1, node2):
14350                                 return node_refcounts[node1] - node_refcounts[node2]
14351                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14352         
14353                         ignore_priority_range = [None]
14354                         ignore_priority_range.extend(
14355                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14356                         while not graph.empty():
14357                                 for ignore_priority in ignore_priority_range:
14358                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14359                                         if nodes:
14360                                                 break
14361                                 if not nodes:
14362                                         raise AssertionError("no root nodes")
14363                                 if ignore_priority is not None:
14364                                         # Some deps have been dropped due to circular dependencies,
14365                                         # so only pop one node in order do minimize the number that
14366                                         # are dropped.
14367                                         del nodes[1:]
14368                                 for node in nodes:
14369                                         graph.remove(node)
14370                                         cleanlist.append(node.cpv)
14371
14372                 unmerge(root_config, myopts, "unmerge", cleanlist,
14373                         ldpath_mtimes, ordered=ordered)
14374
14375         if action == "prune":
14376                 return
14377
14378         if not cleanlist and "--quiet" in myopts:
14379                 return
14380
14381         print "Packages installed:   "+str(len(vardb.cpv_all()))
14382         print "Packages in world:    " + \
14383                 str(len(root_config.sets["world"].getAtoms()))
14384         print "Packages in system:   " + \
14385                 str(len(root_config.sets["system"].getAtoms()))
14386         print "Required packages:    "+str(required_pkgs_total)
14387         if "--pretend" in myopts:
14388                 print "Number to remove:     "+str(len(cleanlist))
14389         else:
14390                 print "Number removed:       "+str(len(cleanlist))
14391
14392 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14393         """
14394         Construct a depgraph for the given resume list. This will raise
14395         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14396         @rtype: tuple
14397         @returns: (success, depgraph, dropped_tasks)
14398         """
14399         skip_masked = True
14400         skip_unsatisfied = True
14401         mergelist = mtimedb["resume"]["mergelist"]
14402         dropped_tasks = set()
14403         while True:
14404                 mydepgraph = depgraph(settings, trees,
14405                         myopts, myparams, spinner)
14406                 try:
14407                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14408                                 skip_masked=skip_masked)
14409                 except depgraph.UnsatisfiedResumeDep, e:
14410                         if not skip_unsatisfied:
14411                                 raise
14412
14413                         graph = mydepgraph.digraph
14414                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14415                                 for dep in e.value)
14416                         traversed_nodes = set()
14417                         unsatisfied_stack = list(unsatisfied_parents)
14418                         while unsatisfied_stack:
14419                                 pkg = unsatisfied_stack.pop()
14420                                 if pkg in traversed_nodes:
14421                                         continue
14422                                 traversed_nodes.add(pkg)
14423
14424                                 # If this package was pulled in by a parent
14425                                 # package scheduled for merge, removing this
14426                                 # package may cause the the parent package's
14427                                 # dependency to become unsatisfied.
14428                                 for parent_node in graph.parent_nodes(pkg):
14429                                         if not isinstance(parent_node, Package) \
14430                                                 or parent_node.operation not in ("merge", "nomerge"):
14431                                                 continue
14432                                         unsatisfied = \
14433                                                 graph.child_nodes(parent_node,
14434                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14435                                         if pkg in unsatisfied:
14436                                                 unsatisfied_parents[parent_node] = parent_node
14437                                                 unsatisfied_stack.append(parent_node)
14438
14439                         pruned_mergelist = []
14440                         for x in mergelist:
14441                                 if isinstance(x, list) and \
14442                                         tuple(x) not in unsatisfied_parents:
14443                                         pruned_mergelist.append(x)
14444
14445                         # If the mergelist doesn't shrink then this loop is infinite.
14446                         if len(pruned_mergelist) == len(mergelist):
14447                                 # This happens if a package can't be dropped because
14448                                 # it's already installed, but it has unsatisfied PDEPEND.
14449                                 raise
14450                         mergelist[:] = pruned_mergelist
14451
14452                         # Exclude installed packages that have been removed from the graph due
14453                         # to failure to build/install runtime dependencies after the dependent
14454                         # package has already been installed.
14455                         dropped_tasks.update(pkg for pkg in \
14456                                 unsatisfied_parents if pkg.operation != "nomerge")
14457                         mydepgraph.break_refs(unsatisfied_parents)
14458
14459                         del e, graph, traversed_nodes, \
14460                                 unsatisfied_parents, unsatisfied_stack
14461                         continue
14462                 else:
14463                         break
14464         return (success, mydepgraph, dropped_tasks)
14465
14466 def action_build(settings, trees, mtimedb,
14467         myopts, myaction, myfiles, spinner):
14468
14469         # validate the state of the resume data
14470         # so that we can make assumptions later.
14471         for k in ("resume", "resume_backup"):
14472                 if k not in mtimedb:
14473                         continue
14474                 resume_data = mtimedb[k]
14475                 if not isinstance(resume_data, dict):
14476                         del mtimedb[k]
14477                         continue
14478                 mergelist = resume_data.get("mergelist")
14479                 if not isinstance(mergelist, list):
14480                         del mtimedb[k]
14481                         continue
14482                 for x in mergelist:
14483                         if not (isinstance(x, list) and len(x) == 4):
14484                                 continue
14485                         pkg_type, pkg_root, pkg_key, pkg_action = x
14486                         if pkg_root not in trees:
14487                                 # Current $ROOT setting differs,
14488                                 # so the list must be stale.
14489                                 mergelist = None
14490                                 break
14491                 if not mergelist:
14492                         del mtimedb[k]
14493                         continue
14494                 resume_opts = resume_data.get("myopts")
14495                 if not isinstance(resume_opts, (dict, list)):
14496                         del mtimedb[k]
14497                         continue
14498                 favorites = resume_data.get("favorites")
14499                 if not isinstance(favorites, list):
14500                         del mtimedb[k]
14501                         continue
14502
14503         resume = False
14504         if "--resume" in myopts and \
14505                 ("resume" in mtimedb or
14506                 "resume_backup" in mtimedb):
14507                 resume = True
14508                 if "resume" not in mtimedb:
14509                         mtimedb["resume"] = mtimedb["resume_backup"]
14510                         del mtimedb["resume_backup"]
14511                         mtimedb.commit()
14512                 # "myopts" is a list for backward compatibility.
14513                 resume_opts = mtimedb["resume"].get("myopts", [])
14514                 if isinstance(resume_opts, list):
14515                         resume_opts = dict((k,True) for k in resume_opts)
14516                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14517                         resume_opts.pop(opt, None)
14518
14519                 # Current options always override resume_opts.
14520                 resume_opts.update(myopts)
14521                 myopts.clear()
14522                 myopts.update(resume_opts)
14523
14524                 if "--debug" in myopts:
14525                         writemsg_level("myopts %s\n" % (myopts,))
14526
14527                 # Adjust config according to options of the command being resumed.
14528                 for myroot in trees:
14529                         mysettings =  trees[myroot]["vartree"].settings
14530                         mysettings.unlock()
14531                         adjust_config(myopts, mysettings)
14532                         mysettings.lock()
14533                         del myroot, mysettings
14534
14535         ldpath_mtimes = mtimedb["ldpath"]
14536         favorites=[]
14537         merge_count = 0
14538         buildpkgonly = "--buildpkgonly" in myopts
14539         pretend = "--pretend" in myopts
14540         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14541         ask = "--ask" in myopts
14542         nodeps = "--nodeps" in myopts
14543         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14544         tree = "--tree" in myopts
14545         if nodeps and tree:
14546                 tree = False
14547                 del myopts["--tree"]
14548                 portage.writemsg(colorize("WARN", " * ") + \
14549                         "--tree is broken with --nodeps. Disabling...\n")
14550         debug = "--debug" in myopts
14551         verbose = "--verbose" in myopts
14552         quiet = "--quiet" in myopts
14553         if pretend or fetchonly:
14554                 # make the mtimedb readonly
14555                 mtimedb.filename = None
14556         if '--digest' in myopts or 'digest' in settings.features:
14557                 if '--digest' in myopts:
14558                         msg = "The --digest option"
14559                 else:
14560                         msg = "The FEATURES=digest setting"
14561
14562                 msg += " can prevent corruption from being" + \
14563                         " noticed. The `repoman manifest` command is the preferred" + \
14564                         " way to generate manifests and it is capable of doing an" + \
14565                         " entire repository or category at once."
14566                 prefix = bad(" * ")
14567                 writemsg(prefix + "\n")
14568                 from textwrap import wrap
14569                 for line in wrap(msg, 72):
14570                         writemsg("%s%s\n" % (prefix, line))
14571                 writemsg(prefix + "\n")
14572
14573         if "--quiet" not in myopts and \
14574                 ("--pretend" in myopts or "--ask" in myopts or \
14575                 "--tree" in myopts or "--verbose" in myopts):
14576                 action = ""
14577                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14578                         action = "fetched"
14579                 elif "--buildpkgonly" in myopts:
14580                         action = "built"
14581                 else:
14582                         action = "merged"
14583                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14584                         print
14585                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14586                         print
14587                 else:
14588                         print
14589                         print darkgreen("These are the packages that would be %s, in order:") % action
14590                         print
14591
14592         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14593         if not show_spinner:
14594                 spinner.update = spinner.update_quiet
14595
14596         if resume:
14597                 favorites = mtimedb["resume"].get("favorites")
14598                 if not isinstance(favorites, list):
14599                         favorites = []
14600
14601                 if show_spinner:
14602                         print "Calculating dependencies  ",
14603                 myparams = create_depgraph_params(myopts, myaction)
14604
14605                 resume_data = mtimedb["resume"]
14606                 mergelist = resume_data["mergelist"]
14607                 if mergelist and "--skipfirst" in myopts:
14608                         for i, task in enumerate(mergelist):
14609                                 if isinstance(task, list) and \
14610                                         task and task[-1] == "merge":
14611                                         del mergelist[i]
14612                                         break
14613
14614                 success = False
14615                 mydepgraph = None
14616                 try:
14617                         success, mydepgraph, dropped_tasks = resume_depgraph(
14618                                 settings, trees, mtimedb, myopts, myparams, spinner)
14619                 except (portage.exception.PackageNotFound,
14620                         depgraph.UnsatisfiedResumeDep), e:
14621                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14622                                 mydepgraph = e.depgraph
14623                         if show_spinner:
14624                                 print
14625                         from textwrap import wrap
14626                         from portage.output import EOutput
14627                         out = EOutput()
14628
14629                         resume_data = mtimedb["resume"]
14630                         mergelist = resume_data.get("mergelist")
14631                         if not isinstance(mergelist, list):
14632                                 mergelist = []
14633                         if mergelist and debug or (verbose and not quiet):
14634                                 out.eerror("Invalid resume list:")
14635                                 out.eerror("")
14636                                 indent = "  "
14637                                 for task in mergelist:
14638                                         if isinstance(task, list):
14639                                                 out.eerror(indent + str(tuple(task)))
14640                                 out.eerror("")
14641
14642                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14643                                 out.eerror("One or more packages are either masked or " + \
14644                                         "have missing dependencies:")
14645                                 out.eerror("")
14646                                 indent = "  "
14647                                 for dep in e.value:
14648                                         if dep.atom is None:
14649                                                 out.eerror(indent + "Masked package:")
14650                                                 out.eerror(2 * indent + str(dep.parent))
14651                                                 out.eerror("")
14652                                         else:
14653                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14654                                                 out.eerror(2 * indent + str(dep.parent))
14655                                                 out.eerror("")
14656                                 msg = "The resume list contains packages " + \
14657                                         "that are either masked or have " + \
14658                                         "unsatisfied dependencies. " + \
14659                                         "Please restart/continue " + \
14660                                         "the operation manually, or use --skipfirst " + \
14661                                         "to skip the first package in the list and " + \
14662                                         "any other packages that may be " + \
14663                                         "masked or have missing dependencies."
14664                                 for line in wrap(msg, 72):
14665                                         out.eerror(line)
14666                         elif isinstance(e, portage.exception.PackageNotFound):
14667                                 out.eerror("An expected package is " + \
14668                                         "not available: %s" % str(e))
14669                                 out.eerror("")
14670                                 msg = "The resume list contains one or more " + \
14671                                         "packages that are no longer " + \
14672                                         "available. Please restart/continue " + \
14673                                         "the operation manually."
14674                                 for line in wrap(msg, 72):
14675                                         out.eerror(line)
14676                 else:
14677                         if show_spinner:
14678                                 print "\b\b... done!"
14679
14680                 if success:
14681                         if dropped_tasks:
14682                                 portage.writemsg("!!! One or more packages have been " + \
14683                                         "dropped due to\n" + \
14684                                         "!!! masking or unsatisfied dependencies:\n\n",
14685                                         noiselevel=-1)
14686                                 for task in dropped_tasks:
14687                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14688                                 portage.writemsg("\n", noiselevel=-1)
14689                         del dropped_tasks
14690                 else:
14691                         if mydepgraph is not None:
14692                                 mydepgraph.display_problems()
14693                         if not (ask or pretend):
14694                                 # delete the current list and also the backup
14695                                 # since it's probably stale too.
14696                                 for k in ("resume", "resume_backup"):
14697                                         mtimedb.pop(k, None)
14698                                 mtimedb.commit()
14699
14700                         return 1
14701         else:
14702                 if ("--resume" in myopts):
14703                         print darkgreen("emerge: It seems we have nothing to resume...")
14704                         return os.EX_OK
14705
14706                 myparams = create_depgraph_params(myopts, myaction)
14707                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14708                         print "Calculating dependencies  ",
14709                         sys.stdout.flush()
14710                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14711                 try:
14712                         retval, favorites = mydepgraph.select_files(myfiles)
14713                 except portage.exception.PackageNotFound, e:
14714                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14715                         return 1
14716                 except portage.exception.PackageSetNotFound, e:
14717                         root_config = trees[settings["ROOT"]]["root_config"]
14718                         display_missing_pkg_set(root_config, e.value)
14719                         return 1
14720                 if show_spinner:
14721                         print "\b\b... done!"
14722                 if not retval:
14723                         mydepgraph.display_problems()
14724                         return 1
14725
14726         if "--pretend" not in myopts and \
14727                 ("--ask" in myopts or "--tree" in myopts or \
14728                 "--verbose" in myopts) and \
14729                 not ("--quiet" in myopts and "--ask" not in myopts):
14730                 if "--resume" in myopts:
14731                         mymergelist = mydepgraph.altlist()
14732                         if len(mymergelist) == 0:
14733                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14734                                 return os.EX_OK
14735                         favorites = mtimedb["resume"]["favorites"]
14736                         retval = mydepgraph.display(
14737                                 mydepgraph.altlist(reversed=tree),
14738                                 favorites=favorites)
14739                         mydepgraph.display_problems()
14740                         if retval != os.EX_OK:
14741                                 return retval
14742                         prompt="Would you like to resume merging these packages?"
14743                 else:
14744                         retval = mydepgraph.display(
14745                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14746                                 favorites=favorites)
14747                         mydepgraph.display_problems()
14748                         if retval != os.EX_OK:
14749                                 return retval
14750                         mergecount=0
14751                         for x in mydepgraph.altlist():
14752                                 if isinstance(x, Package) and x.operation == "merge":
14753                                         mergecount += 1
14754
14755                         if mergecount==0:
14756                                 sets = trees[settings["ROOT"]]["root_config"].sets
14757                                 world_candidates = None
14758                                 if "--noreplace" in myopts and \
14759                                         not oneshot and favorites:
14760                                         # Sets that are not world candidates are filtered
14761                                         # out here since the favorites list needs to be
14762                                         # complete for depgraph.loadResumeCommand() to
14763                                         # operate correctly.
14764                                         world_candidates = [x for x in favorites \
14765                                                 if not (x.startswith(SETPREFIX) and \
14766                                                 not sets[x[1:]].world_candidate)]
14767                                 if "--noreplace" in myopts and \
14768                                         not oneshot and world_candidates:
14769                                         print
14770                                         for x in world_candidates:
14771                                                 print " %s %s" % (good("*"), x)
14772                                         prompt="Would you like to add these packages to your world favorites?"
14773                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14774                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14775                                 else:
14776                                         print
14777                                         print "Nothing to merge; quitting."
14778                                         print
14779                                         return os.EX_OK
14780                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14781                                 prompt="Would you like to fetch the source files for these packages?"
14782                         else:
14783                                 prompt="Would you like to merge these packages?"
14784                 print
14785                 if "--ask" in myopts and userquery(prompt) == "No":
14786                         print
14787                         print "Quitting."
14788                         print
14789                         return os.EX_OK
14790                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14791                 myopts.pop("--ask", None)
14792
14793         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14794                 if ("--resume" in myopts):
14795                         mymergelist = mydepgraph.altlist()
14796                         if len(mymergelist) == 0:
14797                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14798                                 return os.EX_OK
14799                         favorites = mtimedb["resume"]["favorites"]
14800                         retval = mydepgraph.display(
14801                                 mydepgraph.altlist(reversed=tree),
14802                                 favorites=favorites)
14803                         mydepgraph.display_problems()
14804                         if retval != os.EX_OK:
14805                                 return retval
14806                 else:
14807                         retval = mydepgraph.display(
14808                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14809                                 favorites=favorites)
14810                         mydepgraph.display_problems()
14811                         if retval != os.EX_OK:
14812                                 return retval
14813                         if "--buildpkgonly" in myopts:
14814                                 graph_copy = mydepgraph.digraph.clone()
14815                                 removed_nodes = set()
14816                                 for node in graph_copy:
14817                                         if not isinstance(node, Package) or \
14818                                                 node.operation == "nomerge":
14819                                                 removed_nodes.add(node)
14820                                 graph_copy.difference_update(removed_nodes)
14821                                 if not graph_copy.hasallzeros(ignore_priority = \
14822                                         DepPrioritySatisfiedRange.ignore_medium):
14823                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14824                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14825                                         return 1
14826         else:
14827                 if "--buildpkgonly" in myopts:
14828                         graph_copy = mydepgraph.digraph.clone()
14829                         removed_nodes = set()
14830                         for node in graph_copy:
14831                                 if not isinstance(node, Package) or \
14832                                         node.operation == "nomerge":
14833                                         removed_nodes.add(node)
14834                         graph_copy.difference_update(removed_nodes)
14835                         if not graph_copy.hasallzeros(ignore_priority = \
14836                                 DepPrioritySatisfiedRange.ignore_medium):
14837                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14838                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14839                                 return 1
14840
14841                 if ("--resume" in myopts):
14842                         favorites=mtimedb["resume"]["favorites"]
14843                         mymergelist = mydepgraph.altlist()
14844                         mydepgraph.break_refs(mymergelist)
14845                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14846                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14847                         del mydepgraph, mymergelist
14848                         clear_caches(trees)
14849
14850                         retval = mergetask.merge()
14851                         merge_count = mergetask.curval
14852                 else:
14853                         if "resume" in mtimedb and \
14854                         "mergelist" in mtimedb["resume"] and \
14855                         len(mtimedb["resume"]["mergelist"]) > 1:
14856                                 mtimedb["resume_backup"] = mtimedb["resume"]
14857                                 del mtimedb["resume"]
14858                                 mtimedb.commit()
14859                         mtimedb["resume"]={}
14860                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14861                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14862                         # a list type for options.
14863                         mtimedb["resume"]["myopts"] = myopts.copy()
14864
14865                         # Convert Atom instances to plain str.
14866                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14867
14868                         pkglist = mydepgraph.altlist()
14869                         mydepgraph.saveNomergeFavorites()
14870                         mydepgraph.break_refs(pkglist)
14871                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14872                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14873                         del mydepgraph, pkglist
14874                         clear_caches(trees)
14875
14876                         retval = mergetask.merge()
14877                         merge_count = mergetask.curval
14878
14879                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14880                         if "yes" == settings.get("AUTOCLEAN"):
14881                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14882                                 unmerge(trees[settings["ROOT"]]["root_config"],
14883                                         myopts, "clean", [],
14884                                         ldpath_mtimes, autoclean=1)
14885                         else:
14886                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14887                                         + " AUTOCLEAN is disabled.  This can cause serious"
14888                                         + " problems due to overlapping packages.\n")
14889                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14890
14891                 return retval
14892
14893 def multiple_actions(action1, action2):
14894         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14895         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14896         sys.exit(1)
14897
14898 def insert_optional_args(args):
14899         """
14900         Parse optional arguments and insert a value if one has
14901         not been provided. This is done before feeding the args
14902         to the optparse parser since that parser does not support
14903         this feature natively.
14904         """
14905
14906         new_args = []
14907         jobs_opts = ("-j", "--jobs")
14908         root_deps_opt = '--root-deps'
14909         root_deps_choices = ('True', 'rdeps')
14910         arg_stack = args[:]
14911         arg_stack.reverse()
14912         while arg_stack:
14913                 arg = arg_stack.pop()
14914
14915                 if arg == root_deps_opt:
14916                         new_args.append(arg)
14917                         if arg_stack and arg_stack[-1] in root_deps_choices:
14918                                 new_args.append(arg_stack.pop())
14919                         else:
14920                                 # insert default argument
14921                                 new_args.append('True')
14922                         continue
14923
14924                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14925                 if not (short_job_opt or arg in jobs_opts):
14926                         new_args.append(arg)
14927                         continue
14928
14929                 # Insert an empty placeholder in order to
14930                 # satisfy the requirements of optparse.
14931
14932                 new_args.append("--jobs")
14933                 job_count = None
14934                 saved_opts = None
14935                 if short_job_opt and len(arg) > 2:
14936                         if arg[:2] == "-j":
14937                                 try:
14938                                         job_count = int(arg[2:])
14939                                 except ValueError:
14940                                         saved_opts = arg[2:]
14941                         else:
14942                                 job_count = "True"
14943                                 saved_opts = arg[1:].replace("j", "")
14944
14945                 if job_count is None and arg_stack:
14946                         try:
14947                                 job_count = int(arg_stack[-1])
14948                         except ValueError:
14949                                 pass
14950                         else:
14951                                 # Discard the job count from the stack
14952                                 # since we're consuming it here.
14953                                 arg_stack.pop()
14954
14955                 if job_count is None:
14956                         # unlimited number of jobs
14957                         new_args.append("True")
14958                 else:
14959                         new_args.append(str(job_count))
14960
14961                 if saved_opts is not None:
14962                         new_args.append("-" + saved_opts)
14963
14964         return new_args
14965
14966 def parse_opts(tmpcmdline, silent=False):
14967         myaction=None
14968         myopts = {}
14969         myfiles=[]
14970
14971         global actions, options, shortmapping
14972
14973         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14974         argument_options = {
14975                 "--config-root": {
14976                         "help":"specify the location for portage configuration files",
14977                         "action":"store"
14978                 },
14979                 "--color": {
14980                         "help":"enable or disable color output",
14981                         "type":"choice",
14982                         "choices":("y", "n")
14983                 },
14984
14985                 "--jobs": {
14986
14987                         "help"   : "Specifies the number of packages to build " + \
14988                                 "simultaneously.",
14989
14990                         "action" : "store"
14991                 },
14992
14993                 "--load-average": {
14994
14995                         "help"   :"Specifies that no new builds should be started " + \
14996                                 "if there are other builds running and the load average " + \
14997                                 "is at least LOAD (a floating-point number).",
14998
14999                         "action" : "store"
15000                 },
15001
15002                 "--with-bdeps": {
15003                         "help":"include unnecessary build time dependencies",
15004                         "type":"choice",
15005                         "choices":("y", "n")
15006                 },
15007                 "--reinstall": {
15008                         "help":"specify conditions to trigger package reinstallation",
15009                         "type":"choice",
15010                         "choices":["changed-use"]
15011                 },
15012                 "--root": {
15013                  "help"   : "specify the target root filesystem for merging packages",
15014                  "action" : "store"
15015                 },
15016
15017                 "--root-deps": {
15018                         "help"    : "modify interpretation of depedencies",
15019                         "type"    : "choice",
15020                         "choices" :("True", "rdeps")
15021                 },
15022         }
15023
15024         from optparse import OptionParser
15025         parser = OptionParser()
15026         if parser.has_option("--help"):
15027                 parser.remove_option("--help")
15028
15029         for action_opt in actions:
15030                 parser.add_option("--" + action_opt, action="store_true",
15031                         dest=action_opt.replace("-", "_"), default=False)
15032         for myopt in options:
15033                 parser.add_option(myopt, action="store_true",
15034                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15035         for shortopt, longopt in shortmapping.iteritems():
15036                 parser.add_option("-" + shortopt, action="store_true",
15037                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
15038         for myalias, myopt in longopt_aliases.iteritems():
15039                 parser.add_option(myalias, action="store_true",
15040                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15041
15042         for myopt, kwargs in argument_options.iteritems():
15043                 parser.add_option(myopt,
15044                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15045
15046         tmpcmdline = insert_optional_args(tmpcmdline)
15047
15048         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15049
15050         if myoptions.root_deps == "True":
15051                 myoptions.root_deps = True
15052
15053         if myoptions.jobs:
15054                 jobs = None
15055                 if myoptions.jobs == "True":
15056                         jobs = True
15057                 else:
15058                         try:
15059                                 jobs = int(myoptions.jobs)
15060                         except ValueError:
15061                                 jobs = -1
15062
15063                 if jobs is not True and \
15064                         jobs < 1:
15065                         jobs = None
15066                         if not silent:
15067                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15068                                         (myoptions.jobs,), noiselevel=-1)
15069
15070                 myoptions.jobs = jobs
15071
15072         if myoptions.load_average:
15073                 try:
15074                         load_average = float(myoptions.load_average)
15075                 except ValueError:
15076                         load_average = 0.0
15077
15078                 if load_average <= 0.0:
15079                         load_average = None
15080                         if not silent:
15081                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15082                                         (myoptions.load_average,), noiselevel=-1)
15083
15084                 myoptions.load_average = load_average
15085
15086         for myopt in options:
15087                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15088                 if v:
15089                         myopts[myopt] = True
15090
15091         for myopt in argument_options:
15092                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15093                 if v is not None:
15094                         myopts[myopt] = v
15095
15096         if myoptions.searchdesc:
15097                 myoptions.search = True
15098
15099         for action_opt in actions:
15100                 v = getattr(myoptions, action_opt.replace("-", "_"))
15101                 if v:
15102                         if myaction:
15103                                 multiple_actions(myaction, action_opt)
15104                                 sys.exit(1)
15105                         myaction = action_opt
15106
15107         myfiles += myargs
15108
15109         return myaction, myopts, myfiles
15110
15111 def validate_ebuild_environment(trees):
15112         for myroot in trees:
15113                 settings = trees[myroot]["vartree"].settings
15114                 settings.validate()
15115
15116 def clear_caches(trees):
15117         for d in trees.itervalues():
15118                 d["porttree"].dbapi.melt()
15119                 d["porttree"].dbapi._aux_cache.clear()
15120                 d["bintree"].dbapi._aux_cache.clear()
15121                 d["bintree"].dbapi._clear_cache()
15122                 d["vartree"].dbapi.linkmap._clear_cache()
15123         portage.dircache.clear()
15124         gc.collect()
15125
15126 def load_emerge_config(trees=None):
15127         kwargs = {}
15128         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15129                 v = os.environ.get(envvar, None)
15130                 if v and v.strip():
15131                         kwargs[k] = v
15132         trees = portage.create_trees(trees=trees, **kwargs)
15133
15134         for root, root_trees in trees.iteritems():
15135                 settings = root_trees["vartree"].settings
15136                 setconfig = load_default_config(settings, root_trees)
15137                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15138
15139         settings = trees["/"]["vartree"].settings
15140
15141         for myroot in trees:
15142                 if myroot != "/":
15143                         settings = trees[myroot]["vartree"].settings
15144                         break
15145
15146         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15147         mtimedb = portage.MtimeDB(mtimedbfile)
15148         
15149         return settings, trees, mtimedb
15150
15151 def adjust_config(myopts, settings):
15152         """Make emerge specific adjustments to the config."""
15153
15154         # To enhance usability, make some vars case insensitive by forcing them to
15155         # lower case.
15156         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15157                 if myvar in settings:
15158                         settings[myvar] = settings[myvar].lower()
15159                         settings.backup_changes(myvar)
15160         del myvar
15161
15162         # Kill noauto as it will break merges otherwise.
15163         if "noauto" in settings.features:
15164                 settings.features.remove('noauto')
15165                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15166                 settings.backup_changes("FEATURES")
15167
15168         CLEAN_DELAY = 5
15169         try:
15170                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15171         except ValueError, e:
15172                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15173                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15174                         settings["CLEAN_DELAY"], noiselevel=-1)
15175         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15176         settings.backup_changes("CLEAN_DELAY")
15177
15178         EMERGE_WARNING_DELAY = 10
15179         try:
15180                 EMERGE_WARNING_DELAY = int(settings.get(
15181                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15182         except ValueError, e:
15183                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15184                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15185                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15186         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15187         settings.backup_changes("EMERGE_WARNING_DELAY")
15188
15189         if "--quiet" in myopts:
15190                 settings["PORTAGE_QUIET"]="1"
15191                 settings.backup_changes("PORTAGE_QUIET")
15192
15193         if "--verbose" in myopts:
15194                 settings["PORTAGE_VERBOSE"] = "1"
15195                 settings.backup_changes("PORTAGE_VERBOSE")
15196
15197         # Set so that configs will be merged regardless of remembered status
15198         if ("--noconfmem" in myopts):
15199                 settings["NOCONFMEM"]="1"
15200                 settings.backup_changes("NOCONFMEM")
15201
15202         # Set various debug markers... They should be merged somehow.
15203         PORTAGE_DEBUG = 0
15204         try:
15205                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15206                 if PORTAGE_DEBUG not in (0, 1):
15207                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15208                                 PORTAGE_DEBUG, noiselevel=-1)
15209                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15210                                 noiselevel=-1)
15211                         PORTAGE_DEBUG = 0
15212         except ValueError, e:
15213                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15214                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15215                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15216                 del e
15217         if "--debug" in myopts:
15218                 PORTAGE_DEBUG = 1
15219         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15220         settings.backup_changes("PORTAGE_DEBUG")
15221
15222         if settings.get("NOCOLOR") not in ("yes","true"):
15223                 portage.output.havecolor = 1
15224
15225         """The explicit --color < y | n > option overrides the NOCOLOR environment
15226         variable and stdout auto-detection."""
15227         if "--color" in myopts:
15228                 if "y" == myopts["--color"]:
15229                         portage.output.havecolor = 1
15230                         settings["NOCOLOR"] = "false"
15231                 else:
15232                         portage.output.havecolor = 0
15233                         settings["NOCOLOR"] = "true"
15234                 settings.backup_changes("NOCOLOR")
15235         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15236                 portage.output.havecolor = 0
15237                 settings["NOCOLOR"] = "true"
15238                 settings.backup_changes("NOCOLOR")
15239
15240 def apply_priorities(settings):
15241         ionice(settings)
15242         nice(settings)
15243
15244 def nice(settings):
15245         try:
15246                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15247         except (OSError, ValueError), e:
15248                 out = portage.output.EOutput()
15249                 out.eerror("Failed to change nice value to '%s'" % \
15250                         settings["PORTAGE_NICENESS"])
15251                 out.eerror("%s\n" % str(e))
15252
15253 def ionice(settings):
15254
15255         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15256         if ionice_cmd:
15257                 ionice_cmd = shlex.split(ionice_cmd)
15258         if not ionice_cmd:
15259                 return
15260
15261         from portage.util import varexpand
15262         variables = {"PID" : str(os.getpid())}
15263         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15264
15265         try:
15266                 rval = portage.process.spawn(cmd, env=os.environ)
15267         except portage.exception.CommandNotFound:
15268                 # The OS kernel probably doesn't support ionice,
15269                 # so return silently.
15270                 return
15271
15272         if rval != os.EX_OK:
15273                 out = portage.output.EOutput()
15274                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15275                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15276
15277 def display_missing_pkg_set(root_config, set_name):
15278
15279         msg = []
15280         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15281                 "The following sets exist:") % \
15282                 colorize("INFORM", set_name))
15283         msg.append("")
15284
15285         for s in sorted(root_config.sets):
15286                 msg.append("    %s" % s)
15287         msg.append("")
15288
15289         writemsg_level("".join("%s\n" % l for l in msg),
15290                 level=logging.ERROR, noiselevel=-1)
15291
15292 def expand_set_arguments(myfiles, myaction, root_config):
15293         retval = os.EX_OK
15294         setconfig = root_config.setconfig
15295
15296         sets = setconfig.getSets()
15297
15298         # In order to know exactly which atoms/sets should be added to the
15299         # world file, the depgraph performs set expansion later. It will get
15300         # confused about where the atoms came from if it's not allowed to
15301         # expand them itself.
15302         do_not_expand = (None, )
15303         newargs = []
15304         for a in myfiles:
15305                 if a in ("system", "world"):
15306                         newargs.append(SETPREFIX+a)
15307                 else:
15308                         newargs.append(a)
15309         myfiles = newargs
15310         del newargs
15311         newargs = []
15312
15313         # separators for set arguments
15314         ARG_START = "{"
15315         ARG_END = "}"
15316
15317         # WARNING: all operators must be of equal length
15318         IS_OPERATOR = "/@"
15319         DIFF_OPERATOR = "-@"
15320         UNION_OPERATOR = "+@"
15321         
15322         for i in range(0, len(myfiles)):
15323                 if myfiles[i].startswith(SETPREFIX):
15324                         start = 0
15325                         end = 0
15326                         x = myfiles[i][len(SETPREFIX):]
15327                         newset = ""
15328                         while x:
15329                                 start = x.find(ARG_START)
15330                                 end = x.find(ARG_END)
15331                                 if start > 0 and start < end:
15332                                         namepart = x[:start]
15333                                         argpart = x[start+1:end]
15334                                 
15335                                         # TODO: implement proper quoting
15336                                         args = argpart.split(",")
15337                                         options = {}
15338                                         for a in args:
15339                                                 if "=" in a:
15340                                                         k, v  = a.split("=", 1)
15341                                                         options[k] = v
15342                                                 else:
15343                                                         options[a] = "True"
15344                                         setconfig.update(namepart, options)
15345                                         newset += (x[:start-len(namepart)]+namepart)
15346                                         x = x[end+len(ARG_END):]
15347                                 else:
15348                                         newset += x
15349                                         x = ""
15350                         myfiles[i] = SETPREFIX+newset
15351                                 
15352         sets = setconfig.getSets()
15353
15354         # display errors that occured while loading the SetConfig instance
15355         for e in setconfig.errors:
15356                 print colorize("BAD", "Error during set creation: %s" % e)
15357         
15358         # emerge relies on the existance of sets with names "world" and "system"
15359         required_sets = ("world", "system")
15360         missing_sets = []
15361
15362         for s in required_sets:
15363                 if s not in sets:
15364                         missing_sets.append(s)
15365         if missing_sets:
15366                 if len(missing_sets) > 2:
15367                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15368                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15369                 elif len(missing_sets) == 2:
15370                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15371                 else:
15372                         missing_sets_str = '"%s"' % missing_sets[-1]
15373                 msg = ["emerge: incomplete set configuration, " + \
15374                         "missing set(s): %s" % missing_sets_str]
15375                 if sets:
15376                         msg.append("        sets defined: %s" % ", ".join(sets))
15377                 msg.append("        This usually means that '%s'" % \
15378                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15379                 msg.append("        is missing or corrupt.")
15380                 for line in msg:
15381                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15382                 return (None, 1)
15383         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15384
15385         for a in myfiles:
15386                 if a.startswith(SETPREFIX):
15387                         # support simple set operations (intersection, difference and union)
15388                         # on the commandline. Expressions are evaluated strictly left-to-right
15389                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15390                                 expression = a[len(SETPREFIX):]
15391                                 expr_sets = []
15392                                 expr_ops = []
15393                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15394                                         is_pos = expression.rfind(IS_OPERATOR)
15395                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15396                                         union_pos = expression.rfind(UNION_OPERATOR)
15397                                         op_pos = max(is_pos, diff_pos, union_pos)
15398                                         s1 = expression[:op_pos]
15399                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15400                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15401                                         if not s2 in sets:
15402                                                 display_missing_pkg_set(root_config, s2)
15403                                                 return (None, 1)
15404                                         expr_sets.insert(0, s2)
15405                                         expr_ops.insert(0, op)
15406                                         expression = s1
15407                                 if not expression in sets:
15408                                         display_missing_pkg_set(root_config, expression)
15409                                         return (None, 1)
15410                                 expr_sets.insert(0, expression)
15411                                 result = set(setconfig.getSetAtoms(expression))
15412                                 for i in range(0, len(expr_ops)):
15413                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15414                                         if expr_ops[i] == IS_OPERATOR:
15415                                                 result.intersection_update(s2)
15416                                         elif expr_ops[i] == DIFF_OPERATOR:
15417                                                 result.difference_update(s2)
15418                                         elif expr_ops[i] == UNION_OPERATOR:
15419                                                 result.update(s2)
15420                                         else:
15421                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15422                                 newargs.extend(result)
15423                         else:                   
15424                                 s = a[len(SETPREFIX):]
15425                                 if s not in sets:
15426                                         display_missing_pkg_set(root_config, s)
15427                                         return (None, 1)
15428                                 setconfig.active.append(s)
15429                                 try:
15430                                         set_atoms = setconfig.getSetAtoms(s)
15431                                 except portage.exception.PackageSetNotFound, e:
15432                                         writemsg_level(("emerge: the given set '%s' " + \
15433                                                 "contains a non-existent set named '%s'.\n") % \
15434                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15435                                         return (None, 1)
15436                                 if myaction in unmerge_actions and \
15437                                                 not sets[s].supportsOperation("unmerge"):
15438                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15439                                                 "not support unmerge operations\n")
15440                                         retval = 1
15441                                 elif not set_atoms:
15442                                         print "emerge: '%s' is an empty set" % s
15443                                 elif myaction not in do_not_expand:
15444                                         newargs.extend(set_atoms)
15445                                 else:
15446                                         newargs.append(SETPREFIX+s)
15447                                 for e in sets[s].errors:
15448                                         print e
15449                 else:
15450                         newargs.append(a)
15451         return (newargs, retval)
15452
15453 def repo_name_check(trees):
15454         missing_repo_names = set()
15455         for root, root_trees in trees.iteritems():
15456                 if "porttree" in root_trees:
15457                         portdb = root_trees["porttree"].dbapi
15458                         missing_repo_names.update(portdb.porttrees)
15459                         repos = portdb.getRepositories()
15460                         for r in repos:
15461                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15462                         if portdb.porttree_root in missing_repo_names and \
15463                                 not os.path.exists(os.path.join(
15464                                 portdb.porttree_root, "profiles")):
15465                                 # This is normal if $PORTDIR happens to be empty,
15466                                 # so don't warn about it.
15467                                 missing_repo_names.remove(portdb.porttree_root)
15468
15469         if missing_repo_names:
15470                 msg = []
15471                 msg.append("WARNING: One or more repositories " + \
15472                         "have missing repo_name entries:")
15473                 msg.append("")
15474                 for p in missing_repo_names:
15475                         msg.append("\t%s/profiles/repo_name" % (p,))
15476                 msg.append("")
15477                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15478                         "should be a plain text file containing a unique " + \
15479                         "name for the repository on the first line.", 70))
15480                 writemsg_level("".join("%s\n" % l for l in msg),
15481                         level=logging.WARNING, noiselevel=-1)
15482
15483         return bool(missing_repo_names)
15484
15485 def config_protect_check(trees):
15486         for root, root_trees in trees.iteritems():
15487                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15488                         msg = "!!! CONFIG_PROTECT is empty"
15489                         if root != "/":
15490                                 msg += " for '%s'" % root
15491                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15492
15493 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15494
15495         if "--quiet" in myopts:
15496                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15497                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15498                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15499                         print "    " + colorize("INFORM", cp)
15500                 return
15501
15502         s = search(root_config, spinner, "--searchdesc" in myopts,
15503                 "--quiet" not in myopts, "--usepkg" in myopts,
15504                 "--usepkgonly" in myopts)
15505         null_cp = portage.dep_getkey(insert_category_into_atom(
15506                 arg, "null"))
15507         cat, atom_pn = portage.catsplit(null_cp)
15508         s.searchkey = atom_pn
15509         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15510                 s.addCP(cp)
15511         s.output()
15512         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15513         print "!!! one of the above fully-qualified ebuild names instead.\n"
15514
15515 def profile_check(trees, myaction, myopts):
15516         if myaction in ("info", "sync"):
15517                 return os.EX_OK
15518         elif "--version" in myopts or "--help" in myopts:
15519                 return os.EX_OK
15520         for root, root_trees in trees.iteritems():
15521                 if root_trees["root_config"].settings.profiles:
15522                         continue
15523                 # generate some profile related warning messages
15524                 validate_ebuild_environment(trees)
15525                 msg = "If you have just changed your profile configuration, you " + \
15526                         "should revert back to the previous configuration. Due to " + \
15527                         "your current profile being invalid, allowed actions are " + \
15528                         "limited to --help, --info, --sync, and --version."
15529                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15530                         level=logging.ERROR, noiselevel=-1)
15531                 return 1
15532         return os.EX_OK
15533
15534 def emerge_main():
15535         global portage  # NFC why this is necessary now - genone
15536         portage._disable_legacy_globals()
15537         # Disable color until we're sure that it should be enabled (after
15538         # EMERGE_DEFAULT_OPTS has been parsed).
15539         portage.output.havecolor = 0
15540         # This first pass is just for options that need to be known as early as
15541         # possible, such as --config-root.  They will be parsed again later,
15542         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15543         # the value of --config-root).
15544         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15545         if "--debug" in myopts:
15546                 os.environ["PORTAGE_DEBUG"] = "1"
15547         if "--config-root" in myopts:
15548                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15549         if "--root" in myopts:
15550                 os.environ["ROOT"] = myopts["--root"]
15551
15552         # Portage needs to ensure a sane umask for the files it creates.
15553         os.umask(022)
15554         settings, trees, mtimedb = load_emerge_config()
15555         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15556         rval = profile_check(trees, myaction, myopts)
15557         if rval != os.EX_OK:
15558                 return rval
15559
15560         if portage._global_updates(trees, mtimedb["updates"]):
15561                 mtimedb.commit()
15562                 # Reload the whole config from scratch.
15563                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15564                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15565
15566         xterm_titles = "notitles" not in settings.features
15567
15568         tmpcmdline = []
15569         if "--ignore-default-opts" not in myopts:
15570                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15571         tmpcmdline.extend(sys.argv[1:])
15572         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15573
15574         if "--digest" in myopts:
15575                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15576                 # Reload the whole config from scratch so that the portdbapi internal
15577                 # config is updated with new FEATURES.
15578                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15579                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15580
15581         for myroot in trees:
15582                 mysettings =  trees[myroot]["vartree"].settings
15583                 mysettings.unlock()
15584                 adjust_config(myopts, mysettings)
15585                 if '--pretend' not in myopts and myaction in \
15586                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15587                         mysettings["PORTAGE_COUNTER_HASH"] = \
15588                                 trees[myroot]["vartree"].dbapi._counter_hash()
15589                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15590                 mysettings.lock()
15591                 del myroot, mysettings
15592
15593         apply_priorities(settings)
15594
15595         spinner = stdout_spinner()
15596         if "candy" in settings.features:
15597                 spinner.update = spinner.update_scroll
15598
15599         if "--quiet" not in myopts:
15600                 portage.deprecated_profile_check(settings=settings)
15601                 repo_name_check(trees)
15602                 config_protect_check(trees)
15603
15604         for mytrees in trees.itervalues():
15605                 mydb = mytrees["porttree"].dbapi
15606                 # Freeze the portdbapi for performance (memoize all xmatch results).
15607                 mydb.freeze()
15608         del mytrees, mydb
15609
15610         if "moo" in myfiles:
15611                 print """
15612
15613   Larry loves Gentoo (""" + platform.system() + """)
15614
15615  _______________________
15616 < Have you mooed today? >
15617  -----------------------
15618         \   ^__^
15619          \  (oo)\_______
15620             (__)\       )\/\ 
15621                 ||----w |
15622                 ||     ||
15623
15624 """
15625
15626         for x in myfiles:
15627                 ext = os.path.splitext(x)[1]
15628                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15629                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15630                         break
15631
15632         root_config = trees[settings["ROOT"]]["root_config"]
15633         if myaction == "list-sets":
15634                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15635                 sys.stdout.flush()
15636                 return os.EX_OK
15637
15638         # only expand sets for actions taking package arguments
15639         oldargs = myfiles[:]
15640         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15641                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15642                 if retval != os.EX_OK:
15643                         return retval
15644
15645                 # Need to handle empty sets specially, otherwise emerge will react 
15646                 # with the help message for empty argument lists
15647                 if oldargs and not myfiles:
15648                         print "emerge: no targets left after set expansion"
15649                         return 0
15650
15651         if ("--tree" in myopts) and ("--columns" in myopts):
15652                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15653                 return 1
15654
15655         if ("--quiet" in myopts):
15656                 spinner.update = spinner.update_quiet
15657                 portage.util.noiselimit = -1
15658
15659         # Always create packages if FEATURES=buildpkg
15660         # Imply --buildpkg if --buildpkgonly
15661         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15662                 if "--buildpkg" not in myopts:
15663                         myopts["--buildpkg"] = True
15664
15665         # Always try and fetch binary packages if FEATURES=getbinpkg
15666         if ("getbinpkg" in settings.features):
15667                 myopts["--getbinpkg"] = True
15668
15669         if "--buildpkgonly" in myopts:
15670                 # --buildpkgonly will not merge anything, so
15671                 # it cancels all binary package options.
15672                 for opt in ("--getbinpkg", "--getbinpkgonly",
15673                         "--usepkg", "--usepkgonly"):
15674                         myopts.pop(opt, None)
15675
15676         if "--fetch-all-uri" in myopts:
15677                 myopts["--fetchonly"] = True
15678
15679         if "--skipfirst" in myopts and "--resume" not in myopts:
15680                 myopts["--resume"] = True
15681
15682         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15683                 myopts["--usepkgonly"] = True
15684
15685         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15686                 myopts["--getbinpkg"] = True
15687
15688         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15689                 myopts["--usepkg"] = True
15690
15691         # Also allow -K to apply --usepkg/-k
15692         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15693                 myopts["--usepkg"] = True
15694
15695         # Allow -p to remove --ask
15696         if ("--pretend" in myopts) and ("--ask" in myopts):
15697                 print ">>> --pretend disables --ask... removing --ask from options."
15698                 del myopts["--ask"]
15699
15700         # forbid --ask when not in a terminal
15701         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15702         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15703                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15704                         noiselevel=-1)
15705                 return 1
15706
15707         if settings.get("PORTAGE_DEBUG", "") == "1":
15708                 spinner.update = spinner.update_quiet
15709                 portage.debug=1
15710                 if "python-trace" in settings.features:
15711                         import portage.debug
15712                         portage.debug.set_trace(True)
15713
15714         if not ("--quiet" in myopts):
15715                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15716                         spinner.update = spinner.update_basic
15717
15718         if myaction == 'version':
15719                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15720                         settings.profile_path, settings["CHOST"],
15721                         trees[settings["ROOT"]]["vartree"].dbapi)
15722                 return 0
15723         elif "--help" in myopts:
15724                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15725                 return 0
15726
15727         if "--debug" in myopts:
15728                 print "myaction", myaction
15729                 print "myopts", myopts
15730
15731         if not myaction and not myfiles and "--resume" not in myopts:
15732                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15733                 return 1
15734
15735         pretend = "--pretend" in myopts
15736         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15737         buildpkgonly = "--buildpkgonly" in myopts
15738
15739         # check if root user is the current user for the actions where emerge needs this
15740         if portage.secpass < 2:
15741                 # We've already allowed "--version" and "--help" above.
15742                 if "--pretend" not in myopts and myaction not in ("search","info"):
15743                         need_superuser = not \
15744                                 (fetchonly or \
15745                                 (buildpkgonly and secpass >= 1) or \
15746                                 myaction in ("metadata", "regen") or \
15747                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15748                         if portage.secpass < 1 or \
15749                                 need_superuser:
15750                                 if need_superuser:
15751                                         access_desc = "superuser"
15752                                 else:
15753                                         access_desc = "portage group"
15754                                 # Always show portage_group_warning() when only portage group
15755                                 # access is required but the user is not in the portage group.
15756                                 from portage.data import portage_group_warning
15757                                 if "--ask" in myopts:
15758                                         myopts["--pretend"] = True
15759                                         del myopts["--ask"]
15760                                         print ("%s access is required... " + \
15761                                                 "adding --pretend to options.\n") % access_desc
15762                                         if portage.secpass < 1 and not need_superuser:
15763                                                 portage_group_warning()
15764                                 else:
15765                                         sys.stderr.write(("emerge: %s access is " + \
15766                                                 "required.\n\n") % access_desc)
15767                                         if portage.secpass < 1 and not need_superuser:
15768                                                 portage_group_warning()
15769                                         return 1
15770
15771         disable_emergelog = False
15772         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15773                 if x in myopts:
15774                         disable_emergelog = True
15775                         break
15776         if myaction in ("search", "info"):
15777                 disable_emergelog = True
15778         if disable_emergelog:
15779                 """ Disable emergelog for everything except build or unmerge
15780                 operations.  This helps minimize parallel emerge.log entries that can
15781                 confuse log parsers.  We especially want it disabled during
15782                 parallel-fetch, which uses --resume --fetchonly."""
15783                 global emergelog
15784                 def emergelog(*pargs, **kargs):
15785                         pass
15786
15787         if not "--pretend" in myopts:
15788                 emergelog(xterm_titles, "Started emerge on: "+\
15789                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15790                 myelogstr=""
15791                 if myopts:
15792                         myelogstr=" ".join(myopts)
15793                 if myaction:
15794                         myelogstr+=" "+myaction
15795                 if myfiles:
15796                         myelogstr += " " + " ".join(oldargs)
15797                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15798         del oldargs
15799
15800         def emergeexitsig(signum, frame):
15801                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15802                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15803                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15804                 sys.exit(100+signum)
15805         signal.signal(signal.SIGINT, emergeexitsig)
15806         signal.signal(signal.SIGTERM, emergeexitsig)
15807
15808         def emergeexit():
15809                 """This gets out final log message in before we quit."""
15810                 if "--pretend" not in myopts:
15811                         emergelog(xterm_titles, " *** terminating.")
15812                 if "notitles" not in settings.features:
15813                         xtermTitleReset()
15814         portage.atexit_register(emergeexit)
15815
15816         if myaction in ("config", "metadata", "regen", "sync"):
15817                 if "--pretend" in myopts:
15818                         sys.stderr.write(("emerge: The '%s' action does " + \
15819                                 "not support '--pretend'.\n") % myaction)
15820                         return 1
15821
15822         if "sync" == myaction:
15823                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15824         elif "metadata" == myaction:
15825                 action_metadata(settings, portdb, myopts)
15826         elif myaction=="regen":
15827                 validate_ebuild_environment(trees)
15828                 return action_regen(settings, portdb, myopts.get("--jobs"),
15829                         myopts.get("--load-average"))
15830         # HELP action
15831         elif "config"==myaction:
15832                 validate_ebuild_environment(trees)
15833                 action_config(settings, trees, myopts, myfiles)
15834
15835         # SEARCH action
15836         elif "search"==myaction:
15837                 validate_ebuild_environment(trees)
15838                 action_search(trees[settings["ROOT"]]["root_config"],
15839                         myopts, myfiles, spinner)
15840         elif myaction in ("clean", "unmerge") or \
15841                 (myaction == "prune" and "--nodeps" in myopts):
15842                 validate_ebuild_environment(trees)
15843
15844                 # Ensure atoms are valid before calling unmerge().
15845                 # For backward compat, leading '=' is not required.
15846                 for x in myfiles:
15847                         if is_valid_package_atom(x) or \
15848                                 is_valid_package_atom("=" + x):
15849                                 continue
15850                         msg = []
15851                         msg.append("'%s' is not a valid package atom." % (x,))
15852                         msg.append("Please check ebuild(5) for full details.")
15853                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15854                                 level=logging.ERROR, noiselevel=-1)
15855                         return 1
15856
15857                 # When given a list of atoms, unmerge
15858                 # them in the order given.
15859                 ordered = myaction == "unmerge"
15860                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15861                         mtimedb["ldpath"], ordered=ordered):
15862                         if not (buildpkgonly or fetchonly or pretend):
15863                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15864
15865         elif myaction in ("depclean", "info", "prune"):
15866
15867                 # Ensure atoms are valid before calling unmerge().
15868                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15869                 valid_atoms = []
15870                 for x in myfiles:
15871                         if is_valid_package_atom(x):
15872                                 try:
15873                                         valid_atoms.append(
15874                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15875                                 except portage.exception.AmbiguousPackageName, e:
15876                                         msg = "The short ebuild name \"" + x + \
15877                                                 "\" is ambiguous.  Please specify " + \
15878                                                 "one of the following " + \
15879                                                 "fully-qualified ebuild names instead:"
15880                                         for line in textwrap.wrap(msg, 70):
15881                                                 writemsg_level("!!! %s\n" % (line,),
15882                                                         level=logging.ERROR, noiselevel=-1)
15883                                         for i in e[0]:
15884                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15885                                                         level=logging.ERROR, noiselevel=-1)
15886                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15887                                         return 1
15888                                 continue
15889                         msg = []
15890                         msg.append("'%s' is not a valid package atom." % (x,))
15891                         msg.append("Please check ebuild(5) for full details.")
15892                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15893                                 level=logging.ERROR, noiselevel=-1)
15894                         return 1
15895
15896                 if myaction == "info":
15897                         return action_info(settings, trees, myopts, valid_atoms)
15898
15899                 validate_ebuild_environment(trees)
15900                 action_depclean(settings, trees, mtimedb["ldpath"],
15901                         myopts, myaction, valid_atoms, spinner)
15902                 if not (buildpkgonly or fetchonly or pretend):
15903                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15904         # "update", "system", or just process files:
15905         else:
15906                 validate_ebuild_environment(trees)
15907
15908                 for x in myfiles:
15909                         if x.startswith(SETPREFIX) or \
15910                                 is_valid_package_atom(x):
15911                                 continue
15912                         if x[:1] == os.sep:
15913                                 continue
15914                         try:
15915                                 os.lstat(x)
15916                                 continue
15917                         except OSError:
15918                                 pass
15919                         msg = []
15920                         msg.append("'%s' is not a valid package atom." % (x,))
15921                         msg.append("Please check ebuild(5) for full details.")
15922                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15923                                 level=logging.ERROR, noiselevel=-1)
15924                         return 1
15925
15926                 if "--pretend" not in myopts:
15927                         display_news_notification(root_config, myopts)
15928                 retval = action_build(settings, trees, mtimedb,
15929                         myopts, myaction, myfiles, spinner)
15930                 root_config = trees[settings["ROOT"]]["root_config"]
15931                 post_emerge(root_config, myopts, mtimedb, retval)
15932
15933                 return retval