Bug #262211 - Avoid triggering an InvalidAtom exception inside the Package
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge", "version",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 slot = self.slot
1572                 if not slot:
1573                         # Avoid an InvalidAtom exception when creating slot_atom.
1574                         # This package instance will be masked due to empty SLOT.
1575                         slot = '0'
1576                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577                 self.category, self.pf = portage.catsplit(self.cpv)
1578                 self.cpv_split = portage.catpkgsplit(self.cpv)
1579                 self.pv_split = self.cpv_split[1:]
1580
1581         class _use(object):
1582
1583                 __slots__ = ("__weakref__", "enabled")
1584
1585                 def __init__(self, use):
1586                         self.enabled = frozenset(use)
1587
1588         class _iuse(object):
1589
1590                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1591
1592                 def __init__(self, tokens, iuse_implicit):
1593                         self.tokens = tuple(tokens)
1594                         self.iuse_implicit = iuse_implicit
1595                         enabled = []
1596                         disabled = []
1597                         other = []
1598                         for x in tokens:
1599                                 prefix = x[:1]
1600                                 if prefix == "+":
1601                                         enabled.append(x[1:])
1602                                 elif prefix == "-":
1603                                         disabled.append(x[1:])
1604                                 else:
1605                                         other.append(x)
1606                         self.enabled = frozenset(enabled)
1607                         self.disabled = frozenset(disabled)
1608                         self.all = frozenset(chain(enabled, disabled, other))
1609
1610                 def __getattribute__(self, name):
1611                         if name == "regex":
1612                                 try:
1613                                         return object.__getattribute__(self, "regex")
1614                                 except AttributeError:
1615                                         all = object.__getattribute__(self, "all")
1616                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617                                         # Escape anything except ".*" which is supposed
1618                                         # to pass through from _get_implicit_iuse()
1619                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620                                         regex = "^(%s)$" % "|".join(regex)
1621                                         regex = regex.replace("\\.\\*", ".*")
1622                                         self.regex = re.compile(regex)
1623                         return object.__getattribute__(self, name)
1624
1625         def _get_hash_key(self):
1626                 hash_key = getattr(self, "_hash_key", None)
1627                 if hash_key is None:
1628                         if self.operation is None:
1629                                 self.operation = "merge"
1630                                 if self.onlydeps or self.installed:
1631                                         self.operation = "nomerge"
1632                         self._hash_key = \
1633                                 (self.type_name, self.root, self.cpv, self.operation)
1634                 return self._hash_key
1635
1636         def __lt__(self, other):
1637                 if other.cp != self.cp:
1638                         return False
1639                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1640                         return True
1641                 return False
1642
1643         def __le__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1647                         return True
1648                 return False
1649
1650         def __gt__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1654                         return True
1655                 return False
1656
1657         def __ge__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1661                         return True
1662                 return False
1663
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665         if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1668
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1671
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1673         """
1674         Detect metadata updates and synchronize Package attributes.
1675         """
1676
1677         __slots__ = ("_pkg",)
1678         _wrapped_keys = frozenset(
1679                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1680
1681         def __init__(self, pkg, metadata):
1682                 _PackageMetadataWrapperBase.__init__(self)
1683                 self._pkg = pkg
1684                 self.update(metadata)
1685
1686         def __setitem__(self, k, v):
1687                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688                 if k in self._wrapped_keys:
1689                         getattr(self, "_set_" + k.lower())(k, v)
1690
1691         def _set_inherited(self, k, v):
1692                 if isinstance(v, basestring):
1693                         v = frozenset(v.split())
1694                 self._pkg.inherited = v
1695
1696         def _set_iuse(self, k, v):
1697                 self._pkg.iuse = self._pkg._iuse(
1698                         v.split(), self._pkg.root_config.iuse_implicit)
1699
1700         def _set_slot(self, k, v):
1701                 self._pkg.slot = v
1702
1703         def _set_use(self, k, v):
1704                 self._pkg.use = self._pkg._use(v.split())
1705
1706         def _set_counter(self, k, v):
1707                 if isinstance(v, basestring):
1708                         try:
1709                                 v = long(v.strip())
1710                         except ValueError:
1711                                 v = 0
1712                 self._pkg.counter = v
1713
1714         def _set__mtime_(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.mtime = v
1721
1722 class EbuildFetchonly(SlotObject):
1723
1724         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1725
1726         def execute(self):
1727                 settings = self.settings
1728                 pkg = self.pkg
1729                 portdb = pkg.root_config.trees["porttree"].dbapi
1730                 ebuild_path = portdb.findname(pkg.cpv)
1731                 settings.setcpv(pkg)
1732                 debug = settings.get("PORTAGE_DEBUG") == "1"
1733                 use_cache = 1 # always true
1734                 portage.doebuild_environment(ebuild_path, "fetch",
1735                         settings["ROOT"], settings, debug, use_cache, portdb)
1736                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1737
1738                 if restrict_fetch:
1739                         rval = self._execute_with_builddir()
1740                 else:
1741                         rval = portage.doebuild(ebuild_path, "fetch",
1742                                 settings["ROOT"], settings, debug=debug,
1743                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744                                 mydbapi=portdb, tree="porttree")
1745
1746                         if rval != os.EX_OK:
1747                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748                                 eerror(msg, phase="unpack", key=pkg.cpv)
1749
1750                 return rval
1751
1752         def _execute_with_builddir(self):
1753                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754                 # ensuring sane $PWD (bug #239560) and storing elog
1755                 # messages. Use a private temp directory, in order
1756                 # to avoid locking the main one.
1757                 settings = self.settings
1758                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759                 from tempfile import mkdtemp
1760                 try:
1761                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1762                 except OSError, e:
1763                         if e.errno != portage.exception.PermissionDenied.errno:
1764                                 raise
1765                         raise portage.exception.PermissionDenied(global_tmpdir)
1766                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767                 settings.backup_changes("PORTAGE_TMPDIR")
1768                 try:
1769                         retval = self._execute()
1770                 finally:
1771                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1772                         settings.backup_changes("PORTAGE_TMPDIR")
1773                         shutil.rmtree(private_tmpdir)
1774                 return retval
1775
1776         def _execute(self):
1777                 settings = self.settings
1778                 pkg = self.pkg
1779                 root_config = pkg.root_config
1780                 portdb = root_config.trees["porttree"].dbapi
1781                 ebuild_path = portdb.findname(pkg.cpv)
1782                 debug = settings.get("PORTAGE_DEBUG") == "1"
1783                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1784
1785                 retval = portage.doebuild(ebuild_path, "fetch",
1786                         self.settings["ROOT"], self.settings, debug=debug,
1787                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788                         mydbapi=portdb, tree="porttree")
1789
1790                 if retval != os.EX_OK:
1791                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792                         eerror(msg, phase="unpack", key=pkg.cpv)
1793
1794                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1795                 return retval
1796
1797 class PollConstants(object):
1798
1799         """
1800         Provides POLL* constants that are equivalent to those from the
1801         select module, for use by PollSelectAdapter.
1802         """
1803
1804         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1805         v = 1
1806         for k in names:
1807                 locals()[k] = getattr(select, k, v)
1808                 v *= 2
1809         del k, v
1810
1811 class AsynchronousTask(SlotObject):
1812         """
1813         Subclasses override _wait() and _poll() so that calls
1814         to public methods can be wrapped for implementing
1815         hooks such as exit listener notification.
1816
1817         Sublasses should call self.wait() to notify exit listeners after
1818         the task is complete and self.returncode has been set.
1819         """
1820
1821         __slots__ = ("background", "cancelled", "returncode") + \
1822                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1823
1824         def start(self):
1825                 """
1826                 Start an asynchronous task and then return as soon as possible.
1827                 """
1828                 self._start_hook()
1829                 self._start()
1830
1831         def _start(self):
1832                 raise NotImplementedError(self)
1833
1834         def isAlive(self):
1835                 return self.returncode is None
1836
1837         def poll(self):
1838                 self._wait_hook()
1839                 return self._poll()
1840
1841         def _poll(self):
1842                 return self.returncode
1843
1844         def wait(self):
1845                 if self.returncode is None:
1846                         self._wait()
1847                 self._wait_hook()
1848                 return self.returncode
1849
1850         def _wait(self):
1851                 return self.returncode
1852
1853         def cancel(self):
1854                 self.cancelled = True
1855                 self.wait()
1856
1857         def addStartListener(self, f):
1858                 """
1859                 The function will be called with one argument, a reference to self.
1860                 """
1861                 if self._start_listeners is None:
1862                         self._start_listeners = []
1863                 self._start_listeners.append(f)
1864
1865         def removeStartListener(self, f):
1866                 if self._start_listeners is None:
1867                         return
1868                 self._start_listeners.remove(f)
1869
1870         def _start_hook(self):
1871                 if self._start_listeners is not None:
1872                         start_listeners = self._start_listeners
1873                         self._start_listeners = None
1874
1875                         for f in start_listeners:
1876                                 f(self)
1877
1878         def addExitListener(self, f):
1879                 """
1880                 The function will be called with one argument, a reference to self.
1881                 """
1882                 if self._exit_listeners is None:
1883                         self._exit_listeners = []
1884                 self._exit_listeners.append(f)
1885
1886         def removeExitListener(self, f):
1887                 if self._exit_listeners is None:
1888                         if self._exit_listener_stack is not None:
1889                                 self._exit_listener_stack.remove(f)
1890                         return
1891                 self._exit_listeners.remove(f)
1892
1893         def _wait_hook(self):
1894                 """
1895                 Call this method after the task completes, just before returning
1896                 the returncode from wait() or poll(). This hook is
1897                 used to trigger exit listeners when the returncode first
1898                 becomes available.
1899                 """
1900                 if self.returncode is not None and \
1901                         self._exit_listeners is not None:
1902
1903                         # This prevents recursion, in case one of the
1904                         # exit handlers triggers this method again by
1905                         # calling wait(). Use a stack that gives
1906                         # removeExitListener() an opportunity to consume
1907                         # listeners from the stack, before they can get
1908                         # called below. This is necessary because a call
1909                         # to one exit listener may result in a call to
1910                         # removeExitListener() for another listener on
1911                         # the stack. That listener needs to be removed
1912                         # from the stack since it would be inconsistent
1913                         # to call it after it has been been passed into
1914                         # removeExitListener().
1915                         self._exit_listener_stack = self._exit_listeners
1916                         self._exit_listeners = None
1917
1918                         self._exit_listener_stack.reverse()
1919                         while self._exit_listener_stack:
1920                                 self._exit_listener_stack.pop()(self)
1921
1922 class AbstractPollTask(AsynchronousTask):
1923
1924         __slots__ = ("scheduler",) + \
1925                 ("_registered",)
1926
1927         _bufsize = 4096
1928         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1930                 _exceptional_events
1931
1932         def _unregister(self):
1933                 raise NotImplementedError(self)
1934
1935         def _unregister_if_appropriate(self, event):
1936                 if self._registered:
1937                         if event & self._exceptional_events:
1938                                 self._unregister()
1939                                 self.cancel()
1940                         elif event & PollConstants.POLLHUP:
1941                                 self._unregister()
1942                                 self.wait()
1943
1944 class PipeReader(AbstractPollTask):
1945
1946         """
1947         Reads output from one or more files and saves it in memory,
1948         for retrieval via the getvalue() method. This is driven by
1949         the scheduler's poll() loop, so it runs entirely within the
1950         current process.
1951         """
1952
1953         __slots__ = ("input_files",) + \
1954                 ("_read_data", "_reg_ids")
1955
1956         def _start(self):
1957                 self._reg_ids = set()
1958                 self._read_data = []
1959                 for k, f in self.input_files.iteritems():
1960                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1963                                 self._registered_events, self._output_handler))
1964                 self._registered = True
1965
1966         def isAlive(self):
1967                 return self._registered
1968
1969         def cancel(self):
1970                 if self.returncode is None:
1971                         self.returncode = 1
1972                         self.cancelled = True
1973                 self.wait()
1974
1975         def _wait(self):
1976                 if self.returncode is not None:
1977                         return self.returncode
1978
1979                 if self._registered:
1980                         self.scheduler.schedule(self._reg_ids)
1981                         self._unregister()
1982
1983                 self.returncode = os.EX_OK
1984                 return self.returncode
1985
1986         def getvalue(self):
1987                 """Retrieve the entire contents"""
1988                 if sys.hexversion >= 0x3000000:
1989                         return bytes().join(self._read_data)
1990                 return "".join(self._read_data)
1991
1992         def close(self):
1993                 """Free the memory buffer."""
1994                 self._read_data = None
1995
1996         def _output_handler(self, fd, event):
1997
1998                 if event & PollConstants.POLLIN:
1999
2000                         for f in self.input_files.itervalues():
2001                                 if fd == f.fileno():
2002                                         break
2003
2004                         buf = array.array('B')
2005                         try:
2006                                 buf.fromfile(f, self._bufsize)
2007                         except EOFError:
2008                                 pass
2009
2010                         if buf:
2011                                 self._read_data.append(buf.tostring())
2012                         else:
2013                                 self._unregister()
2014                                 self.wait()
2015
2016                 self._unregister_if_appropriate(event)
2017                 return self._registered
2018
2019         def _unregister(self):
2020                 """
2021                 Unregister from the scheduler and close open files.
2022                 """
2023
2024                 self._registered = False
2025
2026                 if self._reg_ids is not None:
2027                         for reg_id in self._reg_ids:
2028                                 self.scheduler.unregister(reg_id)
2029                         self._reg_ids = None
2030
2031                 if self.input_files is not None:
2032                         for f in self.input_files.itervalues():
2033                                 f.close()
2034                         self.input_files = None
2035
2036 class CompositeTask(AsynchronousTask):
2037
2038         __slots__ = ("scheduler",) + ("_current_task",)
2039
2040         def isAlive(self):
2041                 return self._current_task is not None
2042
2043         def cancel(self):
2044                 self.cancelled = True
2045                 if self._current_task is not None:
2046                         self._current_task.cancel()
2047
2048         def _poll(self):
2049                 """
2050                 This does a loop calling self._current_task.poll()
2051                 repeatedly as long as the value of self._current_task
2052                 keeps changing. It calls poll() a maximum of one time
2053                 for a given self._current_task instance. This is useful
2054                 since calling poll() on a task can trigger advance to
2055                 the next task could eventually lead to the returncode
2056                 being set in cases when polling only a single task would
2057                 not have the same effect.
2058                 """
2059
2060                 prev = None
2061                 while True:
2062                         task = self._current_task
2063                         if task is None or task is prev:
2064                                 # don't poll the same task more than once
2065                                 break
2066                         task.poll()
2067                         prev = task
2068
2069                 return self.returncode
2070
2071         def _wait(self):
2072
2073                 prev = None
2074                 while True:
2075                         task = self._current_task
2076                         if task is None:
2077                                 # don't wait for the same task more than once
2078                                 break
2079                         if task is prev:
2080                                 # Before the task.wait() method returned, an exit
2081                                 # listener should have set self._current_task to either
2082                                 # a different task or None. Something is wrong.
2083                                 raise AssertionError("self._current_task has not " + \
2084                                         "changed since calling wait", self, task)
2085                         task.wait()
2086                         prev = task
2087
2088                 return self.returncode
2089
2090         def _assert_current(self, task):
2091                 """
2092                 Raises an AssertionError if the given task is not the
2093                 same one as self._current_task. This can be useful
2094                 for detecting bugs.
2095                 """
2096                 if task is not self._current_task:
2097                         raise AssertionError("Unrecognized task: %s" % (task,))
2098
2099         def _default_exit(self, task):
2100                 """
2101                 Calls _assert_current() on the given task and then sets the
2102                 composite returncode attribute if task.returncode != os.EX_OK.
2103                 If the task failed then self._current_task will be set to None.
2104                 Subclasses can use this as a generic task exit callback.
2105
2106                 @rtype: int
2107                 @returns: The task.returncode attribute.
2108                 """
2109                 self._assert_current(task)
2110                 if task.returncode != os.EX_OK:
2111                         self.returncode = task.returncode
2112                         self._current_task = None
2113                 return task.returncode
2114
2115         def _final_exit(self, task):
2116                 """
2117                 Assumes that task is the final task of this composite task.
2118                 Calls _default_exit() and sets self.returncode to the task's
2119                 returncode and sets self._current_task to None.
2120                 """
2121                 self._default_exit(task)
2122                 self._current_task = None
2123                 self.returncode = task.returncode
2124                 return self.returncode
2125
2126         def _default_final_exit(self, task):
2127                 """
2128                 This calls _final_exit() and then wait().
2129
2130                 Subclasses can use this as a generic final task exit callback.
2131
2132                 """
2133                 self._final_exit(task)
2134                 return self.wait()
2135
2136         def _start_task(self, task, exit_handler):
2137                 """
2138                 Register exit handler for the given task, set it
2139                 as self._current_task, and call task.start().
2140
2141                 Subclasses can use this as a generic way to start
2142                 a task.
2143
2144                 """
2145                 task.addExitListener(exit_handler)
2146                 self._current_task = task
2147                 task.start()
2148
2149 class TaskSequence(CompositeTask):
2150         """
2151         A collection of tasks that executes sequentially. Each task
2152         must have a addExitListener() method that can be used as
2153         a means to trigger movement from one task to the next.
2154         """
2155
2156         __slots__ = ("_task_queue",)
2157
2158         def __init__(self, **kwargs):
2159                 AsynchronousTask.__init__(self, **kwargs)
2160                 self._task_queue = deque()
2161
2162         def add(self, task):
2163                 self._task_queue.append(task)
2164
2165         def _start(self):
2166                 self._start_next_task()
2167
2168         def cancel(self):
2169                 self._task_queue.clear()
2170                 CompositeTask.cancel(self)
2171
2172         def _start_next_task(self):
2173                 self._start_task(self._task_queue.popleft(),
2174                         self._task_exit_handler)
2175
2176         def _task_exit_handler(self, task):
2177                 if self._default_exit(task) != os.EX_OK:
2178                         self.wait()
2179                 elif self._task_queue:
2180                         self._start_next_task()
2181                 else:
2182                         self._final_exit(task)
2183                         self.wait()
2184
2185 class SubProcess(AbstractPollTask):
2186
2187         __slots__ = ("pid",) + \
2188                 ("_files", "_reg_id")
2189
2190         # A file descriptor is required for the scheduler to monitor changes from
2191         # inside a poll() loop. When logging is not enabled, create a pipe just to
2192         # serve this purpose alone.
2193         _dummy_pipe_fd = 9
2194
2195         def _poll(self):
2196                 if self.returncode is not None:
2197                         return self.returncode
2198                 if self.pid is None:
2199                         return self.returncode
2200                 if self._registered:
2201                         return self.returncode
2202
2203                 try:
2204                         retval = os.waitpid(self.pid, os.WNOHANG)
2205                 except OSError, e:
2206                         if e.errno != errno.ECHILD:
2207                                 raise
2208                         del e
2209                         retval = (self.pid, 1)
2210
2211                 if retval == (0, 0):
2212                         return None
2213                 self._set_returncode(retval)
2214                 return self.returncode
2215
2216         def cancel(self):
2217                 if self.isAlive():
2218                         try:
2219                                 os.kill(self.pid, signal.SIGTERM)
2220                         except OSError, e:
2221                                 if e.errno != errno.ESRCH:
2222                                         raise
2223                                 del e
2224
2225                 self.cancelled = True
2226                 if self.pid is not None:
2227                         self.wait()
2228                 return self.returncode
2229
2230         def isAlive(self):
2231                 return self.pid is not None and \
2232                         self.returncode is None
2233
2234         def _wait(self):
2235
2236                 if self.returncode is not None:
2237                         return self.returncode
2238
2239                 if self._registered:
2240                         self.scheduler.schedule(self._reg_id)
2241                         self._unregister()
2242                         if self.returncode is not None:
2243                                 return self.returncode
2244
2245                 try:
2246                         wait_retval = os.waitpid(self.pid, 0)
2247                 except OSError, e:
2248                         if e.errno != errno.ECHILD:
2249                                 raise
2250                         del e
2251                         self._set_returncode((self.pid, 1))
2252                 else:
2253                         self._set_returncode(wait_retval)
2254
2255                 return self.returncode
2256
2257         def _unregister(self):
2258                 """
2259                 Unregister from the scheduler and close open files.
2260                 """
2261
2262                 self._registered = False
2263
2264                 if self._reg_id is not None:
2265                         self.scheduler.unregister(self._reg_id)
2266                         self._reg_id = None
2267
2268                 if self._files is not None:
2269                         for f in self._files.itervalues():
2270                                 f.close()
2271                         self._files = None
2272
2273         def _set_returncode(self, wait_retval):
2274
2275                 retval = wait_retval[1]
2276
2277                 if retval != os.EX_OK:
2278                         if retval & 0xff:
2279                                 retval = (retval & 0xff) << 8
2280                         else:
2281                                 retval = retval >> 8
2282
2283                 self.returncode = retval
2284
2285 class SpawnProcess(SubProcess):
2286
2287         """
2288         Constructor keyword args are passed into portage.process.spawn().
2289         The required "args" keyword argument will be passed as the first
2290         spawn() argument.
2291         """
2292
2293         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294                 "uid", "gid", "groups", "umask", "logfile",
2295                 "path_lookup", "pre_exec")
2296
2297         __slots__ = ("args",) + \
2298                 _spawn_kwarg_names
2299
2300         _file_names = ("log", "process", "stdout")
2301         _files_dict = slot_dict_class(_file_names, prefix="")
2302
2303         def _start(self):
2304
2305                 if self.cancelled:
2306                         return
2307
2308                 if self.fd_pipes is None:
2309                         self.fd_pipes = {}
2310                 fd_pipes = self.fd_pipes
2311                 fd_pipes.setdefault(0, sys.stdin.fileno())
2312                 fd_pipes.setdefault(1, sys.stdout.fileno())
2313                 fd_pipes.setdefault(2, sys.stderr.fileno())
2314
2315                 # flush any pending output
2316                 for fd in fd_pipes.itervalues():
2317                         if fd == sys.stdout.fileno():
2318                                 sys.stdout.flush()
2319                         if fd == sys.stderr.fileno():
2320                                 sys.stderr.flush()
2321
2322                 logfile = self.logfile
2323                 self._files = self._files_dict()
2324                 files = self._files
2325
2326                 master_fd, slave_fd = self._pipe(fd_pipes)
2327                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2329
2330                 null_input = None
2331                 fd_pipes_orig = fd_pipes.copy()
2332                 if self.background:
2333                         # TODO: Use job control functions like tcsetpgrp() to control
2334                         # access to stdin. Until then, use /dev/null so that any
2335                         # attempts to read from stdin will immediately return EOF
2336                         # instead of blocking indefinitely.
2337                         null_input = open('/dev/null', 'rb')
2338                         fd_pipes[0] = null_input.fileno()
2339                 else:
2340                         fd_pipes[0] = fd_pipes_orig[0]
2341
2342                 files.process = os.fdopen(master_fd, 'rb')
2343                 if logfile is not None:
2344
2345                         fd_pipes[1] = slave_fd
2346                         fd_pipes[2] = slave_fd
2347
2348                         files.log = open(logfile, mode='ab')
2349                         portage.util.apply_secpass_permissions(logfile,
2350                                 uid=portage.portage_uid, gid=portage.portage_gid,
2351                                 mode=0660)
2352
2353                         if not self.background:
2354                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2355
2356                         output_handler = self._output_handler
2357
2358                 else:
2359
2360                         # Create a dummy pipe so the scheduler can monitor
2361                         # the process from inside a poll() loop.
2362                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2363                         if self.background:
2364                                 fd_pipes[1] = slave_fd
2365                                 fd_pipes[2] = slave_fd
2366                         output_handler = self._dummy_handler
2367
2368                 kwargs = {}
2369                 for k in self._spawn_kwarg_names:
2370                         v = getattr(self, k)
2371                         if v is not None:
2372                                 kwargs[k] = v
2373
2374                 kwargs["fd_pipes"] = fd_pipes
2375                 kwargs["returnpid"] = True
2376                 kwargs.pop("logfile", None)
2377
2378                 self._reg_id = self.scheduler.register(files.process.fileno(),
2379                         self._registered_events, output_handler)
2380                 self._registered = True
2381
2382                 retval = self._spawn(self.args, **kwargs)
2383
2384                 os.close(slave_fd)
2385                 if null_input is not None:
2386                         null_input.close()
2387
2388                 if isinstance(retval, int):
2389                         # spawn failed
2390                         self._unregister()
2391                         self.returncode = retval
2392                         self.wait()
2393                         return
2394
2395                 self.pid = retval[0]
2396                 portage.process.spawned_pids.remove(self.pid)
2397
2398         def _pipe(self, fd_pipes):
2399                 """
2400                 @type fd_pipes: dict
2401                 @param fd_pipes: pipes from which to copy terminal size if desired.
2402                 """
2403                 return os.pipe()
2404
2405         def _spawn(self, args, **kwargs):
2406                 return portage.process.spawn(args, **kwargs)
2407
2408         def _output_handler(self, fd, event):
2409
2410                 if event & PollConstants.POLLIN:
2411
2412                         files = self._files
2413                         buf = array.array('B')
2414                         try:
2415                                 buf.fromfile(files.process, self._bufsize)
2416                         except EOFError:
2417                                 pass
2418
2419                         if buf:
2420                                 if not self.background:
2421                                         buf.tofile(files.stdout)
2422                                         files.stdout.flush()
2423                                 buf.tofile(files.log)
2424                                 files.log.flush()
2425                         else:
2426                                 self._unregister()
2427                                 self.wait()
2428
2429                 self._unregister_if_appropriate(event)
2430                 return self._registered
2431
2432         def _dummy_handler(self, fd, event):
2433                 """
2434                 This method is mainly interested in detecting EOF, since
2435                 the only purpose of the pipe is to allow the scheduler to
2436                 monitor the process from inside a poll() loop.
2437                 """
2438
2439                 if event & PollConstants.POLLIN:
2440
2441                         buf = array.array('B')
2442                         try:
2443                                 buf.fromfile(self._files.process, self._bufsize)
2444                         except EOFError:
2445                                 pass
2446
2447                         if buf:
2448                                 pass
2449                         else:
2450                                 self._unregister()
2451                                 self.wait()
2452
2453                 self._unregister_if_appropriate(event)
2454                 return self._registered
2455
2456 class MiscFunctionsProcess(SpawnProcess):
2457         """
2458         Spawns misc-functions.sh with an existing ebuild environment.
2459         """
2460
2461         __slots__ = ("commands", "phase", "pkg", "settings")
2462
2463         def _start(self):
2464                 settings = self.settings
2465                 settings.pop("EBUILD_PHASE", None)
2466                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467                 misc_sh_binary = os.path.join(portage_bin_path,
2468                         os.path.basename(portage.const.MISC_SH_BINARY))
2469
2470                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2472
2473                 portage._doebuild_exit_status_unlink(
2474                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2475
2476                 SpawnProcess._start(self)
2477
2478         def _spawn(self, args, **kwargs):
2479                 settings = self.settings
2480                 debug = settings.get("PORTAGE_DEBUG") == "1"
2481                 return portage.spawn(" ".join(args), settings,
2482                         debug=debug, **kwargs)
2483
2484         def _set_returncode(self, wait_retval):
2485                 SpawnProcess._set_returncode(self, wait_retval)
2486                 self.returncode = portage._doebuild_exit_status_check_and_log(
2487                         self.settings, self.phase, self.returncode)
2488
2489 class EbuildFetcher(SpawnProcess):
2490
2491         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2492                 ("_build_dir",)
2493
2494         def _start(self):
2495
2496                 root_config = self.pkg.root_config
2497                 portdb = root_config.trees["porttree"].dbapi
2498                 ebuild_path = portdb.findname(self.pkg.cpv)
2499                 settings = self.config_pool.allocate()
2500                 settings.setcpv(self.pkg)
2501
2502                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503                 # should not be touched since otherwise it could interfere with
2504                 # another instance of the same cpv concurrently being built for a
2505                 # different $ROOT (currently, builds only cooperate with prefetchers
2506                 # that are spawned for the same $ROOT).
2507                 if not self.prefetch:
2508                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509                         self._build_dir.lock()
2510                         self._build_dir.clean()
2511                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512                         if self.logfile is None:
2513                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 phase = "fetch"
2516                 if self.fetchall:
2517                         phase = "fetchall"
2518
2519                 # If any incremental variables have been overridden
2520                 # via the environment, those values need to be passed
2521                 # along here so that they are correctly considered by
2522                 # the config instance in the subproccess.
2523                 fetch_env = os.environ.copy()
2524
2525                 nocolor = settings.get("NOCOLOR")
2526                 if nocolor is not None:
2527                         fetch_env["NOCOLOR"] = nocolor
2528
2529                 fetch_env["PORTAGE_NICENESS"] = "0"
2530                 if self.prefetch:
2531                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2532
2533                 ebuild_binary = os.path.join(
2534                         settings["PORTAGE_BIN_PATH"], "ebuild")
2535
2536                 fetch_args = [ebuild_binary, ebuild_path, phase]
2537                 debug = settings.get("PORTAGE_DEBUG") == "1"
2538                 if debug:
2539                         fetch_args.append("--debug")
2540
2541                 self.args = fetch_args
2542                 self.env = fetch_env
2543                 SpawnProcess._start(self)
2544
2545         def _pipe(self, fd_pipes):
2546                 """When appropriate, use a pty so that fetcher progress bars,
2547                 like wget has, will work properly."""
2548                 if self.background or not sys.stdout.isatty():
2549                         # When the output only goes to a log file,
2550                         # there's no point in creating a pty.
2551                         return os.pipe()
2552                 stdout_pipe = fd_pipes.get(1)
2553                 got_pty, master_fd, slave_fd = \
2554                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555                 return (master_fd, slave_fd)
2556
2557         def _set_returncode(self, wait_retval):
2558                 SpawnProcess._set_returncode(self, wait_retval)
2559                 # Collect elog messages that might have been
2560                 # created by the pkg_nofetch phase.
2561                 if self._build_dir is not None:
2562                         # Skip elog messages for prefetch, in order to avoid duplicates.
2563                         if not self.prefetch and self.returncode != os.EX_OK:
2564                                 elog_out = None
2565                                 if self.logfile is not None:
2566                                         if self.background:
2567                                                 elog_out = open(self.logfile, 'a')
2568                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569                                 if self.logfile is not None:
2570                                         msg += ", Log file:"
2571                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572                                 if self.logfile is not None:
2573                                         eerror(" '%s'" % (self.logfile,),
2574                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575                                 if elog_out is not None:
2576                                         elog_out.close()
2577                         if not self.prefetch:
2578                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579                         features = self._build_dir.settings.features
2580                         if self.returncode == os.EX_OK:
2581                                 self._build_dir.clean()
2582                         self._build_dir.unlock()
2583                         self.config_pool.deallocate(self._build_dir.settings)
2584                         self._build_dir = None
2585
2586 class EbuildBuildDir(SlotObject):
2587
2588         __slots__ = ("dir_path", "pkg", "settings",
2589                 "locked", "_catdir", "_lock_obj")
2590
2591         def __init__(self, **kwargs):
2592                 SlotObject.__init__(self, **kwargs)
2593                 self.locked = False
2594
2595         def lock(self):
2596                 """
2597                 This raises an AlreadyLocked exception if lock() is called
2598                 while a lock is already held. In order to avoid this, call
2599                 unlock() or check whether the "locked" attribute is True
2600                 or False before calling lock().
2601                 """
2602                 if self._lock_obj is not None:
2603                         raise self.AlreadyLocked((self._lock_obj,))
2604
2605                 dir_path = self.dir_path
2606                 if dir_path is None:
2607                         root_config = self.pkg.root_config
2608                         portdb = root_config.trees["porttree"].dbapi
2609                         ebuild_path = portdb.findname(self.pkg.cpv)
2610                         settings = self.settings
2611                         settings.setcpv(self.pkg)
2612                         debug = settings.get("PORTAGE_DEBUG") == "1"
2613                         use_cache = 1 # always true
2614                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615                                 self.settings, debug, use_cache, portdb)
2616                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2617
2618                 catdir = os.path.dirname(dir_path)
2619                 self._catdir = catdir
2620
2621                 portage.util.ensure_dirs(os.path.dirname(catdir),
2622                         gid=portage.portage_gid,
2623                         mode=070, mask=0)
2624                 catdir_lock = None
2625                 try:
2626                         catdir_lock = portage.locks.lockdir(catdir)
2627                         portage.util.ensure_dirs(catdir,
2628                                 gid=portage.portage_gid,
2629                                 mode=070, mask=0)
2630                         self._lock_obj = portage.locks.lockdir(dir_path)
2631                 finally:
2632                         self.locked = self._lock_obj is not None
2633                         if catdir_lock is not None:
2634                                 portage.locks.unlockdir(catdir_lock)
2635
2636         def clean(self):
2637                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638                 by keepwork or keeptemp in FEATURES."""
2639                 settings = self.settings
2640                 features = settings.features
2641                 if not ("keepwork" in features or "keeptemp" in features):
2642                         try:
2643                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644                         except EnvironmentError, e:
2645                                 if e.errno != errno.ENOENT:
2646                                         raise
2647                                 del e
2648
2649         def unlock(self):
2650                 if self._lock_obj is None:
2651                         return
2652
2653                 portage.locks.unlockdir(self._lock_obj)
2654                 self._lock_obj = None
2655                 self.locked = False
2656
2657                 catdir = self._catdir
2658                 catdir_lock = None
2659                 try:
2660                         catdir_lock = portage.locks.lockdir(catdir)
2661                 finally:
2662                         if catdir_lock:
2663                                 try:
2664                                         os.rmdir(catdir)
2665                                 except OSError, e:
2666                                         if e.errno not in (errno.ENOENT,
2667                                                 errno.ENOTEMPTY, errno.EEXIST):
2668                                                 raise
2669                                         del e
2670                                 portage.locks.unlockdir(catdir_lock)
2671
2672         class AlreadyLocked(portage.exception.PortageException):
2673                 pass
2674
2675 class EbuildBuild(CompositeTask):
2676
2677         __slots__ = ("args_set", "config_pool", "find_blockers",
2678                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679                 "prefetcher", "settings", "world_atom") + \
2680                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2681
2682         def _start(self):
2683
2684                 logger = self.logger
2685                 opts = self.opts
2686                 pkg = self.pkg
2687                 settings = self.settings
2688                 world_atom = self.world_atom
2689                 root_config = pkg.root_config
2690                 tree = "porttree"
2691                 self._tree = tree
2692                 portdb = root_config.trees[tree].dbapi
2693                 settings.setcpv(pkg)
2694                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695                 ebuild_path = portdb.findname(self.pkg.cpv)
2696                 self._ebuild_path = ebuild_path
2697
2698                 prefetcher = self.prefetcher
2699                 if prefetcher is None:
2700                         pass
2701                 elif not prefetcher.isAlive():
2702                         prefetcher.cancel()
2703                 elif prefetcher.poll() is None:
2704
2705                         waiting_msg = "Fetching files " + \
2706                                 "in the background. " + \
2707                                 "To view fetch progress, run `tail -f " + \
2708                                 "/var/log/emerge-fetch.log` in another " + \
2709                                 "terminal."
2710                         msg_prefix = colorize("GOOD", " * ")
2711                         from textwrap import wrap
2712                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713                                 for line in wrap(waiting_msg, 65))
2714                         if not self.background:
2715                                 writemsg(waiting_msg, noiselevel=-1)
2716
2717                         self._current_task = prefetcher
2718                         prefetcher.addExitListener(self._prefetch_exit)
2719                         return
2720
2721                 self._prefetch_exit(prefetcher)
2722
2723         def _prefetch_exit(self, prefetcher):
2724
2725                 opts = self.opts
2726                 pkg = self.pkg
2727                 settings = self.settings
2728
2729                 if opts.fetchonly:
2730                                 fetcher = EbuildFetchonly(
2731                                         fetch_all=opts.fetch_all_uri,
2732                                         pkg=pkg, pretend=opts.pretend,
2733                                         settings=settings)
2734                                 retval = fetcher.execute()
2735                                 self.returncode = retval
2736                                 self.wait()
2737                                 return
2738
2739                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740                         fetchall=opts.fetch_all_uri,
2741                         fetchonly=opts.fetchonly,
2742                         background=self.background,
2743                         pkg=pkg, scheduler=self.scheduler)
2744
2745                 self._start_task(fetcher, self._fetch_exit)
2746
2747         def _fetch_exit(self, fetcher):
2748                 opts = self.opts
2749                 pkg = self.pkg
2750
2751                 fetch_failed = False
2752                 if opts.fetchonly:
2753                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2754                 else:
2755                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2756
2757                 if fetch_failed and fetcher.logfile is not None and \
2758                         os.path.exists(fetcher.logfile):
2759                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2760
2761                 if not fetch_failed and fetcher.logfile is not None:
2762                         # Fetch was successful, so remove the fetch log.
2763                         try:
2764                                 os.unlink(fetcher.logfile)
2765                         except OSError:
2766                                 pass
2767
2768                 if fetch_failed or opts.fetchonly:
2769                         self.wait()
2770                         return
2771
2772                 logger = self.logger
2773                 opts = self.opts
2774                 pkg_count = self.pkg_count
2775                 scheduler = self.scheduler
2776                 settings = self.settings
2777                 features = settings.features
2778                 ebuild_path = self._ebuild_path
2779                 system_set = pkg.root_config.sets["system"]
2780
2781                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782                 self._build_dir.lock()
2783
2784                 # Cleaning is triggered before the setup
2785                 # phase, in portage.doebuild().
2786                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788                 short_msg = "emerge: (%s of %s) %s Clean" % \
2789                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790                 logger.log(msg, short_msg=short_msg)
2791
2792                 #buildsyspkg: Check if we need to _force_ binary package creation
2793                 self._issyspkg = "buildsyspkg" in features and \
2794                                 system_set.findAtomForPackage(pkg) and \
2795                                 not opts.buildpkg
2796
2797                 if opts.buildpkg or self._issyspkg:
2798
2799                         self._buildpkg = True
2800
2801                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803                         short_msg = "emerge: (%s of %s) %s Compile" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805                         logger.log(msg, short_msg=short_msg)
2806
2807                 else:
2808                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810                         short_msg = "emerge: (%s of %s) %s Compile" % \
2811                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812                         logger.log(msg, short_msg=short_msg)
2813
2814                 build = EbuildExecuter(background=self.background, pkg=pkg,
2815                         scheduler=scheduler, settings=settings)
2816                 self._start_task(build, self._build_exit)
2817
2818         def _unlock_builddir(self):
2819                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820                 self._build_dir.unlock()
2821
2822         def _build_exit(self, build):
2823                 if self._default_exit(build) != os.EX_OK:
2824                         self._unlock_builddir()
2825                         self.wait()
2826                         return
2827
2828                 opts = self.opts
2829                 buildpkg = self._buildpkg
2830
2831                 if not buildpkg:
2832                         self._final_exit(build)
2833                         self.wait()
2834                         return
2835
2836                 if self._issyspkg:
2837                         msg = ">>> This is a system package, " + \
2838                                 "let's pack a rescue tarball.\n"
2839
2840                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2841                         if log_path is not None:
2842                                 log_file = open(log_path, 'a')
2843                                 try:
2844                                         log_file.write(msg)
2845                                 finally:
2846                                         log_file.close()
2847
2848                         if not self.background:
2849                                 portage.writemsg_stdout(msg, noiselevel=-1)
2850
2851                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852                         scheduler=self.scheduler, settings=self.settings)
2853
2854                 self._start_task(packager, self._buildpkg_exit)
2855
2856         def _buildpkg_exit(self, packager):
2857                 """
2858                 Released build dir lock when there is a failure or
2859                 when in buildpkgonly mode. Otherwise, the lock will
2860                 be released when merge() is called.
2861                 """
2862
2863                 if self._default_exit(packager) != os.EX_OK:
2864                         self._unlock_builddir()
2865                         self.wait()
2866                         return
2867
2868                 if self.opts.buildpkgonly:
2869                         # Need to call "clean" phase for buildpkgonly mode
2870                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2871                         phase = "clean"
2872                         clean_phase = EbuildPhase(background=self.background,
2873                                 pkg=self.pkg, phase=phase,
2874                                 scheduler=self.scheduler, settings=self.settings,
2875                                 tree=self._tree)
2876                         self._start_task(clean_phase, self._clean_exit)
2877                         return
2878
2879                 # Continue holding the builddir lock until
2880                 # after the package has been installed.
2881                 self._current_task = None
2882                 self.returncode = packager.returncode
2883                 self.wait()
2884
2885         def _clean_exit(self, clean_phase):
2886                 if self._final_exit(clean_phase) != os.EX_OK or \
2887                         self.opts.buildpkgonly:
2888                         self._unlock_builddir()
2889                 self.wait()
2890
2891         def install(self):
2892                 """
2893                 Install the package and then clean up and release locks.
2894                 Only call this after the build has completed successfully
2895                 and neither fetchonly nor buildpkgonly mode are enabled.
2896                 """
2897
2898                 find_blockers = self.find_blockers
2899                 ldpath_mtimes = self.ldpath_mtimes
2900                 logger = self.logger
2901                 pkg = self.pkg
2902                 pkg_count = self.pkg_count
2903                 settings = self.settings
2904                 world_atom = self.world_atom
2905                 ebuild_path = self._ebuild_path
2906                 tree = self._tree
2907
2908                 merge = EbuildMerge(find_blockers=self.find_blockers,
2909                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910                         pkg_count=pkg_count, pkg_path=ebuild_path,
2911                         scheduler=self.scheduler,
2912                         settings=settings, tree=tree, world_atom=world_atom)
2913
2914                 msg = " === (%s of %s) Merging (%s::%s)" % \
2915                         (pkg_count.curval, pkg_count.maxval,
2916                         pkg.cpv, ebuild_path)
2917                 short_msg = "emerge: (%s of %s) %s Merge" % \
2918                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919                 logger.log(msg, short_msg=short_msg)
2920
2921                 try:
2922                         rval = merge.execute()
2923                 finally:
2924                         self._unlock_builddir()
2925
2926                 return rval
2927
2928 class EbuildExecuter(CompositeTask):
2929
2930         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2931
2932         _phases = ("prepare", "configure", "compile", "test", "install")
2933
2934         _live_eclasses = frozenset([
2935                 "bzr",
2936                 "cvs",
2937                 "darcs",
2938                 "git",
2939                 "mercurial",
2940                 "subversion"
2941         ])
2942
2943         def _start(self):
2944                 self._tree = "porttree"
2945                 pkg = self.pkg
2946                 phase = "clean"
2947                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949                 self._start_task(clean_phase, self._clean_phase_exit)
2950
2951         def _clean_phase_exit(self, clean_phase):
2952
2953                 if self._default_exit(clean_phase) != os.EX_OK:
2954                         self.wait()
2955                         return
2956
2957                 pkg = self.pkg
2958                 scheduler = self.scheduler
2959                 settings = self.settings
2960                 cleanup = 1
2961
2962                 # This initializes PORTAGE_LOG_FILE.
2963                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2964
2965                 setup_phase = EbuildPhase(background=self.background,
2966                         pkg=pkg, phase="setup", scheduler=scheduler,
2967                         settings=settings, tree=self._tree)
2968
2969                 setup_phase.addExitListener(self._setup_exit)
2970                 self._current_task = setup_phase
2971                 self.scheduler.scheduleSetup(setup_phase)
2972
2973         def _setup_exit(self, setup_phase):
2974
2975                 if self._default_exit(setup_phase) != os.EX_OK:
2976                         self.wait()
2977                         return
2978
2979                 unpack_phase = EbuildPhase(background=self.background,
2980                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981                         settings=self.settings, tree=self._tree)
2982
2983                 if self._live_eclasses.intersection(self.pkg.inherited):
2984                         # Serialize $DISTDIR access for live ebuilds since
2985                         # otherwise they can interfere with eachother.
2986
2987                         unpack_phase.addExitListener(self._unpack_exit)
2988                         self._current_task = unpack_phase
2989                         self.scheduler.scheduleUnpack(unpack_phase)
2990
2991                 else:
2992                         self._start_task(unpack_phase, self._unpack_exit)
2993
2994         def _unpack_exit(self, unpack_phase):
2995
2996                 if self._default_exit(unpack_phase) != os.EX_OK:
2997                         self.wait()
2998                         return
2999
3000                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3001
3002                 pkg = self.pkg
3003                 phases = self._phases
3004                 eapi = pkg.metadata["EAPI"]
3005                 if eapi in ("0", "1"):
3006                         # skip src_prepare and src_configure
3007                         phases = phases[2:]
3008
3009                 for phase in phases:
3010                         ebuild_phases.add(EbuildPhase(background=self.background,
3011                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012                                 settings=self.settings, tree=self._tree))
3013
3014                 self._start_task(ebuild_phases, self._default_final_exit)
3015
3016 class EbuildMetadataPhase(SubProcess):
3017
3018         """
3019         Asynchronous interface for the ebuild "depend" phase which is
3020         used to extract metadata from the ebuild.
3021         """
3022
3023         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3025                 ("_raw_metadata",)
3026
3027         _file_names = ("ebuild",)
3028         _files_dict = slot_dict_class(_file_names, prefix="")
3029         _metadata_fd = 9
3030
3031         def _start(self):
3032                 settings = self.settings
3033                 settings.reset()
3034                 ebuild_path = self.ebuild_path
3035                 debug = settings.get("PORTAGE_DEBUG") == "1"
3036                 master_fd = None
3037                 slave_fd = None
3038                 fd_pipes = None
3039                 if self.fd_pipes is not None:
3040                         fd_pipes = self.fd_pipes.copy()
3041                 else:
3042                         fd_pipes = {}
3043
3044                 fd_pipes.setdefault(0, sys.stdin.fileno())
3045                 fd_pipes.setdefault(1, sys.stdout.fileno())
3046                 fd_pipes.setdefault(2, sys.stderr.fileno())
3047
3048                 # flush any pending output
3049                 for fd in fd_pipes.itervalues():
3050                         if fd == sys.stdout.fileno():
3051                                 sys.stdout.flush()
3052                         if fd == sys.stderr.fileno():
3053                                 sys.stderr.flush()
3054
3055                 fd_pipes_orig = fd_pipes.copy()
3056                 self._files = self._files_dict()
3057                 files = self._files
3058
3059                 master_fd, slave_fd = os.pipe()
3060                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3062
3063                 fd_pipes[self._metadata_fd] = slave_fd
3064
3065                 self._raw_metadata = []
3066                 files.ebuild = os.fdopen(master_fd, 'r')
3067                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068                         self._registered_events, self._output_handler)
3069                 self._registered = True
3070
3071                 retval = portage.doebuild(ebuild_path, "depend",
3072                         settings["ROOT"], settings, debug,
3073                         mydbapi=self.portdb, tree="porttree",
3074                         fd_pipes=fd_pipes, returnpid=True)
3075
3076                 os.close(slave_fd)
3077
3078                 if isinstance(retval, int):
3079                         # doebuild failed before spawning
3080                         self._unregister()
3081                         self.returncode = retval
3082                         self.wait()
3083                         return
3084
3085                 self.pid = retval[0]
3086                 portage.process.spawned_pids.remove(self.pid)
3087
3088         def _output_handler(self, fd, event):
3089
3090                 if event & PollConstants.POLLIN:
3091                         self._raw_metadata.append(self._files.ebuild.read())
3092                         if not self._raw_metadata[-1]:
3093                                 self._unregister()
3094                                 self.wait()
3095
3096                 self._unregister_if_appropriate(event)
3097                 return self._registered
3098
3099         def _set_returncode(self, wait_retval):
3100                 SubProcess._set_returncode(self, wait_retval)
3101                 if self.returncode == os.EX_OK:
3102                         metadata_lines = "".join(self._raw_metadata).splitlines()
3103                         if len(portage.auxdbkeys) != len(metadata_lines):
3104                                 # Don't trust bash's returncode if the
3105                                 # number of lines is incorrect.
3106                                 self.returncode = 1
3107                         else:
3108                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3109                                 self.metadata_callback(self.cpv, self.ebuild_path,
3110                                         self.repo_path, metadata, self.ebuild_mtime)
3111
3112 class EbuildProcess(SpawnProcess):
3113
3114         __slots__ = ("phase", "pkg", "settings", "tree")
3115
3116         def _start(self):
3117                 # Don't open the log file during the clean phase since the
3118                 # open file can result in an nfs lock on $T/build.log which
3119                 # prevents the clean phase from removing $T.
3120                 if self.phase not in ("clean", "cleanrm"):
3121                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122                 SpawnProcess._start(self)
3123
3124         def _pipe(self, fd_pipes):
3125                 stdout_pipe = fd_pipes.get(1)
3126                 got_pty, master_fd, slave_fd = \
3127                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128                 return (master_fd, slave_fd)
3129
3130         def _spawn(self, args, **kwargs):
3131
3132                 root_config = self.pkg.root_config
3133                 tree = self.tree
3134                 mydbapi = root_config.trees[tree].dbapi
3135                 settings = self.settings
3136                 ebuild_path = settings["EBUILD"]
3137                 debug = settings.get("PORTAGE_DEBUG") == "1"
3138
3139                 rval = portage.doebuild(ebuild_path, self.phase,
3140                         root_config.root, settings, debug,
3141                         mydbapi=mydbapi, tree=tree, **kwargs)
3142
3143                 return rval
3144
3145         def _set_returncode(self, wait_retval):
3146                 SpawnProcess._set_returncode(self, wait_retval)
3147
3148                 if self.phase not in ("clean", "cleanrm"):
3149                         self.returncode = portage._doebuild_exit_status_check_and_log(
3150                                 self.settings, self.phase, self.returncode)
3151
3152                 if self.phase == "test" and self.returncode != os.EX_OK and \
3153                         "test-fail-continue" in self.settings.features:
3154                         self.returncode = os.EX_OK
3155
3156                 portage._post_phase_userpriv_perms(self.settings)
3157
3158 class EbuildPhase(CompositeTask):
3159
3160         __slots__ = ("background", "pkg", "phase",
3161                 "scheduler", "settings", "tree")
3162
3163         _post_phase_cmds = portage._post_phase_cmds
3164
3165         def _start(self):
3166
3167                 ebuild_process = EbuildProcess(background=self.background,
3168                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169                         settings=self.settings, tree=self.tree)
3170
3171                 self._start_task(ebuild_process, self._ebuild_exit)
3172
3173         def _ebuild_exit(self, ebuild_process):
3174
3175                 if self.phase == "install":
3176                         out = None
3177                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3178                         log_file = None
3179                         if self.background and log_path is not None:
3180                                 log_file = open(log_path, 'a')
3181                                 out = log_file
3182                         try:
3183                                 portage._check_build_log(self.settings, out=out)
3184                         finally:
3185                                 if log_file is not None:
3186                                         log_file.close()
3187
3188                 if self._default_exit(ebuild_process) != os.EX_OK:
3189                         self.wait()
3190                         return
3191
3192                 settings = self.settings
3193
3194                 if self.phase == "install":
3195                         portage._post_src_install_uid_fix(settings)
3196
3197                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198                 if post_phase_cmds is not None:
3199                         post_phase = MiscFunctionsProcess(background=self.background,
3200                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201                                 scheduler=self.scheduler, settings=settings)
3202                         self._start_task(post_phase, self._post_phase_exit)
3203                         return
3204
3205                 self.returncode = ebuild_process.returncode
3206                 self._current_task = None
3207                 self.wait()
3208
3209         def _post_phase_exit(self, post_phase):
3210                 if self._final_exit(post_phase) != os.EX_OK:
3211                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3212                                 noiselevel=-1)
3213                 self._current_task = None
3214                 self.wait()
3215                 return
3216
3217 class EbuildBinpkg(EbuildProcess):
3218         """
3219         This assumes that src_install() has successfully completed.
3220         """
3221         __slots__ = ("_binpkg_tmpfile",)
3222
3223         def _start(self):
3224                 self.phase = "package"
3225                 self.tree = "porttree"
3226                 pkg = self.pkg
3227                 root_config = pkg.root_config
3228                 portdb = root_config.trees["porttree"].dbapi
3229                 bintree = root_config.trees["bintree"]
3230                 ebuild_path = portdb.findname(self.pkg.cpv)
3231                 settings = self.settings
3232                 debug = settings.get("PORTAGE_DEBUG") == "1"
3233
3234                 bintree.prevent_collision(pkg.cpv)
3235                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236                         pkg.cpv + ".tbz2." + str(os.getpid()))
3237                 self._binpkg_tmpfile = binpkg_tmpfile
3238                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3240
3241                 try:
3242                         EbuildProcess._start(self)
3243                 finally:
3244                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3245
3246         def _set_returncode(self, wait_retval):
3247                 EbuildProcess._set_returncode(self, wait_retval)
3248
3249                 pkg = self.pkg
3250                 bintree = pkg.root_config.trees["bintree"]
3251                 binpkg_tmpfile = self._binpkg_tmpfile
3252                 if self.returncode == os.EX_OK:
3253                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3254
3255 class EbuildMerge(SlotObject):
3256
3257         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258                 "pkg", "pkg_count", "pkg_path", "pretend",
3259                 "scheduler", "settings", "tree", "world_atom")
3260
3261         def execute(self):
3262                 root_config = self.pkg.root_config
3263                 settings = self.settings
3264                 retval = portage.merge(settings["CATEGORY"],
3265                         settings["PF"], settings["D"],
3266                         os.path.join(settings["PORTAGE_BUILDDIR"],
3267                         "build-info"), root_config.root, settings,
3268                         myebuild=settings["EBUILD"],
3269                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270                         vartree=root_config.trees["vartree"],
3271                         prev_mtimes=self.ldpath_mtimes,
3272                         scheduler=self.scheduler,
3273                         blockers=self.find_blockers)
3274
3275                 if retval == os.EX_OK:
3276                         self.world_atom(self.pkg)
3277                         self._log_success()
3278
3279                 return retval
3280
3281         def _log_success(self):
3282                 pkg = self.pkg
3283                 pkg_count = self.pkg_count
3284                 pkg_path = self.pkg_path
3285                 logger = self.logger
3286                 if "noclean" not in self.settings.features:
3287                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289                         logger.log((" === (%s of %s) " + \
3290                                 "Post-Build Cleaning (%s::%s)") % \
3291                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292                                 short_msg=short_msg)
3293                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3295
3296 class PackageUninstall(AsynchronousTask):
3297
3298         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3299
3300         def _start(self):
3301                 try:
3302                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3303                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305                                 writemsg_level=self._writemsg_level)
3306                 except UninstallFailure, e:
3307                         self.returncode = e.status
3308                 else:
3309                         self.returncode = os.EX_OK
3310                 self.wait()
3311
3312         def _writemsg_level(self, msg, level=0, noiselevel=0):
3313
3314                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315                 background = self.background
3316
3317                 if log_path is None:
3318                         if not (background and level < logging.WARNING):
3319                                 portage.util.writemsg_level(msg,
3320                                         level=level, noiselevel=noiselevel)
3321                 else:
3322                         if not background:
3323                                 portage.util.writemsg_level(msg,
3324                                         level=level, noiselevel=noiselevel)
3325
3326                         f = open(log_path, 'a')
3327                         try:
3328                                 f.write(msg)
3329                         finally:
3330                                 f.close()
3331
3332 class Binpkg(CompositeTask):
3333
3334         __slots__ = ("find_blockers",
3335                 "ldpath_mtimes", "logger", "opts",
3336                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3339
3340         def _writemsg_level(self, msg, level=0, noiselevel=0):
3341
3342                 if not self.background:
3343                         portage.util.writemsg_level(msg,
3344                                 level=level, noiselevel=noiselevel)
3345
3346                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347                 if  log_path is not None:
3348                         f = open(log_path, 'a')
3349                         try:
3350                                 f.write(msg)
3351                         finally:
3352                                 f.close()
3353
3354         def _start(self):
3355
3356                 pkg = self.pkg
3357                 settings = self.settings
3358                 settings.setcpv(pkg)
3359                 self._tree = "bintree"
3360                 self._bintree = self.pkg.root_config.trees[self._tree]
3361                 self._verify = not self.opts.pretend
3362
3363                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364                         "portage", pkg.category, pkg.pf)
3365                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366                         pkg=pkg, settings=settings)
3367                 self._image_dir = os.path.join(dir_path, "image")
3368                 self._infloc = os.path.join(dir_path, "build-info")
3369                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370                 settings["EBUILD"] = self._ebuild_path
3371                 debug = settings.get("PORTAGE_DEBUG") == "1"
3372                 portage.doebuild_environment(self._ebuild_path, "setup",
3373                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3375
3376                 # The prefetcher has already completed or it
3377                 # could be running now. If it's running now,
3378                 # wait for it to complete since it holds
3379                 # a lock on the file being fetched. The
3380                 # portage.locks functions are only designed
3381                 # to work between separate processes. Since
3382                 # the lock is held by the current process,
3383                 # use the scheduler and fetcher methods to
3384                 # synchronize with the fetcher.
3385                 prefetcher = self.prefetcher
3386                 if prefetcher is None:
3387                         pass
3388                 elif not prefetcher.isAlive():
3389                         prefetcher.cancel()
3390                 elif prefetcher.poll() is None:
3391
3392                         waiting_msg = ("Fetching '%s' " + \
3393                                 "in the background. " + \
3394                                 "To view fetch progress, run `tail -f " + \
3395                                 "/var/log/emerge-fetch.log` in another " + \
3396                                 "terminal.") % prefetcher.pkg_path
3397                         msg_prefix = colorize("GOOD", " * ")
3398                         from textwrap import wrap
3399                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400                                 for line in wrap(waiting_msg, 65))
3401                         if not self.background:
3402                                 writemsg(waiting_msg, noiselevel=-1)
3403
3404                         self._current_task = prefetcher
3405                         prefetcher.addExitListener(self._prefetch_exit)
3406                         return
3407
3408                 self._prefetch_exit(prefetcher)
3409
3410         def _prefetch_exit(self, prefetcher):
3411
3412                 pkg = self.pkg
3413                 pkg_count = self.pkg_count
3414                 if not (self.opts.pretend or self.opts.fetchonly):
3415                         self._build_dir.lock()
3416                         try:
3417                                 shutil.rmtree(self._build_dir.dir_path)
3418                         except EnvironmentError, e:
3419                                 if e.errno != errno.ENOENT:
3420                                         raise
3421                                 del e
3422                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3423                 fetcher = BinpkgFetcher(background=self.background,
3424                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3425                         pretend=self.opts.pretend, scheduler=self.scheduler)
3426                 pkg_path = fetcher.pkg_path
3427                 self._pkg_path = pkg_path
3428
3429                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3430
3431                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3432                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3433                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3434                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3435                         self.logger.log(msg, short_msg=short_msg)
3436                         self._start_task(fetcher, self._fetcher_exit)
3437                         return
3438
3439                 self._fetcher_exit(fetcher)
3440
3441         def _fetcher_exit(self, fetcher):
3442
3443                 # The fetcher only has a returncode when
3444                 # --getbinpkg is enabled.
3445                 if fetcher.returncode is not None:
3446                         self._fetched_pkg = True
3447                         if self._default_exit(fetcher) != os.EX_OK:
3448                                 self._unlock_builddir()
3449                                 self.wait()
3450                                 return
3451
3452                 if self.opts.pretend:
3453                         self._current_task = None
3454                         self.returncode = os.EX_OK
3455                         self.wait()
3456                         return
3457
3458                 verifier = None
3459                 if self._verify:
3460                         logfile = None
3461                         if self.background:
3462                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3463                         verifier = BinpkgVerifier(background=self.background,
3464                                 logfile=logfile, pkg=self.pkg)
3465                         self._start_task(verifier, self._verifier_exit)
3466                         return
3467
3468                 self._verifier_exit(verifier)
3469
3470         def _verifier_exit(self, verifier):
3471                 if verifier is not None and \
3472                         self._default_exit(verifier) != os.EX_OK:
3473                         self._unlock_builddir()
3474                         self.wait()
3475                         return
3476
3477                 logger = self.logger
3478                 pkg = self.pkg
3479                 pkg_count = self.pkg_count
3480                 pkg_path = self._pkg_path
3481
3482                 if self._fetched_pkg:
3483                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3484
3485                 if self.opts.fetchonly:
3486                         self._current_task = None
3487                         self.returncode = os.EX_OK
3488                         self.wait()
3489                         return
3490
3491                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3492                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3493                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3494                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3495                 logger.log(msg, short_msg=short_msg)
3496
3497                 phase = "clean"
3498                 settings = self.settings
3499                 ebuild_phase = EbuildPhase(background=self.background,
3500                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3501                         settings=settings, tree=self._tree)
3502
3503                 self._start_task(ebuild_phase, self._clean_exit)
3504
3505         def _clean_exit(self, clean_phase):
3506                 if self._default_exit(clean_phase) != os.EX_OK:
3507                         self._unlock_builddir()
3508                         self.wait()
3509                         return
3510
3511                 dir_path = self._build_dir.dir_path
3512
3513                 try:
3514                         shutil.rmtree(dir_path)
3515                 except (IOError, OSError), e:
3516                         if e.errno != errno.ENOENT:
3517                                 raise
3518                         del e
3519
3520                 infloc = self._infloc
3521                 pkg = self.pkg
3522                 pkg_path = self._pkg_path
3523
3524                 dir_mode = 0755
3525                 for mydir in (dir_path, self._image_dir, infloc):
3526                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3527                                 gid=portage.data.portage_gid, mode=dir_mode)
3528
3529                 # This initializes PORTAGE_LOG_FILE.
3530                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3531                 self._writemsg_level(">>> Extracting info\n")
3532
3533                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3534                 check_missing_metadata = ("CATEGORY", "PF")
3535                 missing_metadata = set()
3536                 for k in check_missing_metadata:
3537                         v = pkg_xpak.getfile(k)
3538                         if not v:
3539                                 missing_metadata.add(k)
3540
3541                 pkg_xpak.unpackinfo(infloc)
3542                 for k in missing_metadata:
3543                         if k == "CATEGORY":
3544                                 v = pkg.category
3545                         elif k == "PF":
3546                                 v = pkg.pf
3547                         else:
3548                                 continue
3549
3550                         f = open(os.path.join(infloc, k), 'wb')
3551                         try:
3552                                 f.write(v + "\n")
3553                         finally:
3554                                 f.close()
3555
3556                 # Store the md5sum in the vdb.
3557                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3558                 try:
3559                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3560                 finally:
3561                         f.close()
3562
3563                 # This gives bashrc users an opportunity to do various things
3564                 # such as remove binary packages after they're installed.
3565                 settings = self.settings
3566                 settings.setcpv(self.pkg)
3567                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3568                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3569
3570                 phase = "setup"
3571                 setup_phase = EbuildPhase(background=self.background,
3572                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3573                         settings=settings, tree=self._tree)
3574
3575                 setup_phase.addExitListener(self._setup_exit)
3576                 self._current_task = setup_phase
3577                 self.scheduler.scheduleSetup(setup_phase)
3578
3579         def _setup_exit(self, setup_phase):
3580                 if self._default_exit(setup_phase) != os.EX_OK:
3581                         self._unlock_builddir()
3582                         self.wait()
3583                         return
3584
3585                 extractor = BinpkgExtractorAsync(background=self.background,
3586                         image_dir=self._image_dir,
3587                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3588                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3589                 self._start_task(extractor, self._extractor_exit)
3590
3591         def _extractor_exit(self, extractor):
3592                 if self._final_exit(extractor) != os.EX_OK:
3593                         self._unlock_builddir()
3594                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3595                                 noiselevel=-1)
3596                 self.wait()
3597
3598         def _unlock_builddir(self):
3599                 if self.opts.pretend or self.opts.fetchonly:
3600                         return
3601                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3602                 self._build_dir.unlock()
3603
3604         def install(self):
3605
3606                 # This gives bashrc users an opportunity to do various things
3607                 # such as remove binary packages after they're installed.
3608                 settings = self.settings
3609                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3610                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3611
3612                 merge = EbuildMerge(find_blockers=self.find_blockers,
3613                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3614                         pkg=self.pkg, pkg_count=self.pkg_count,
3615                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3616                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3617
3618                 try:
3619                         retval = merge.execute()
3620                 finally:
3621                         settings.pop("PORTAGE_BINPKG_FILE", None)
3622                         self._unlock_builddir()
3623                 return retval
3624
3625 class BinpkgFetcher(SpawnProcess):
3626
3627         __slots__ = ("pkg", "pretend",
3628                 "locked", "pkg_path", "_lock_obj")
3629
3630         def __init__(self, **kwargs):
3631                 SpawnProcess.__init__(self, **kwargs)
3632                 pkg = self.pkg
3633                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3634
3635         def _start(self):
3636
3637                 if self.cancelled:
3638                         return
3639
3640                 pkg = self.pkg
3641                 pretend = self.pretend
3642                 bintree = pkg.root_config.trees["bintree"]
3643                 settings = bintree.settings
3644                 use_locks = "distlocks" in settings.features
3645                 pkg_path = self.pkg_path
3646
3647                 if not pretend:
3648                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3649                         if use_locks:
3650                                 self.lock()
3651                 exists = os.path.exists(pkg_path)
3652                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3653                 if not (pretend or resume):
3654                         # Remove existing file or broken symlink.
3655                         try:
3656                                 os.unlink(pkg_path)
3657                         except OSError:
3658                                 pass
3659
3660                 # urljoin doesn't work correctly with
3661                 # unrecognized protocols like sftp
3662                 if bintree._remote_has_index:
3663                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3664                         if not rel_uri:
3665                                 rel_uri = pkg.cpv + ".tbz2"
3666                         uri = bintree._remote_base_uri.rstrip("/") + \
3667                                 "/" + rel_uri.lstrip("/")
3668                 else:
3669                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3670                                 "/" + pkg.pf + ".tbz2"
3671
3672                 if pretend:
3673                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3674                         self.returncode = os.EX_OK
3675                         self.wait()
3676                         return
3677
3678                 protocol = urlparse.urlparse(uri)[0]
3679                 fcmd_prefix = "FETCHCOMMAND"
3680                 if resume:
3681                         fcmd_prefix = "RESUMECOMMAND"
3682                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3683                 if not fcmd:
3684                         fcmd = settings.get(fcmd_prefix)
3685
3686                 fcmd_vars = {
3687                         "DISTDIR" : os.path.dirname(pkg_path),
3688                         "URI"     : uri,
3689                         "FILE"    : os.path.basename(pkg_path)
3690                 }
3691
3692                 fetch_env = dict(settings.iteritems())
3693                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3694                         for x in shlex.split(fcmd)]
3695
3696                 if self.fd_pipes is None:
3697                         self.fd_pipes = {}
3698                 fd_pipes = self.fd_pipes
3699
3700                 # Redirect all output to stdout since some fetchers like
3701                 # wget pollute stderr (if portage detects a problem then it
3702                 # can send it's own message to stderr).
3703                 fd_pipes.setdefault(0, sys.stdin.fileno())
3704                 fd_pipes.setdefault(1, sys.stdout.fileno())
3705                 fd_pipes.setdefault(2, sys.stdout.fileno())
3706
3707                 self.args = fetch_args
3708                 self.env = fetch_env
3709                 SpawnProcess._start(self)
3710
3711         def _set_returncode(self, wait_retval):
3712                 SpawnProcess._set_returncode(self, wait_retval)
3713                 if self.returncode == os.EX_OK:
3714                         # If possible, update the mtime to match the remote package if
3715                         # the fetcher didn't already do it automatically.
3716                         bintree = self.pkg.root_config.trees["bintree"]
3717                         if bintree._remote_has_index:
3718                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3719                                 if remote_mtime is not None:
3720                                         try:
3721                                                 remote_mtime = long(remote_mtime)
3722                                         except ValueError:
3723                                                 pass
3724                                         else:
3725                                                 try:
3726                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3727                                                 except OSError:
3728                                                         pass
3729                                                 else:
3730                                                         if remote_mtime != local_mtime:
3731                                                                 try:
3732                                                                         os.utime(self.pkg_path,
3733                                                                                 (remote_mtime, remote_mtime))
3734                                                                 except OSError:
3735                                                                         pass
3736
3737                 if self.locked:
3738                         self.unlock()
3739
3740         def lock(self):
3741                 """
3742                 This raises an AlreadyLocked exception if lock() is called
3743                 while a lock is already held. In order to avoid this, call
3744                 unlock() or check whether the "locked" attribute is True
3745                 or False before calling lock().
3746                 """
3747                 if self._lock_obj is not None:
3748                         raise self.AlreadyLocked((self._lock_obj,))
3749
3750                 self._lock_obj = portage.locks.lockfile(
3751                         self.pkg_path, wantnewlockfile=1)
3752                 self.locked = True
3753
3754         class AlreadyLocked(portage.exception.PortageException):
3755                 pass
3756
3757         def unlock(self):
3758                 if self._lock_obj is None:
3759                         return
3760                 portage.locks.unlockfile(self._lock_obj)
3761                 self._lock_obj = None
3762                 self.locked = False
3763
3764 class BinpkgVerifier(AsynchronousTask):
3765         __slots__ = ("logfile", "pkg",)
3766
3767         def _start(self):
3768                 """
3769                 Note: Unlike a normal AsynchronousTask.start() method,
3770                 this one does all work is synchronously. The returncode
3771                 attribute will be set before it returns.
3772                 """
3773
3774                 pkg = self.pkg
3775                 root_config = pkg.root_config
3776                 bintree = root_config.trees["bintree"]
3777                 rval = os.EX_OK
3778                 stdout_orig = sys.stdout
3779                 stderr_orig = sys.stderr
3780                 log_file = None
3781                 if self.background and self.logfile is not None:
3782                         log_file = open(self.logfile, 'a')
3783                 try:
3784                         if log_file is not None:
3785                                 sys.stdout = log_file
3786                                 sys.stderr = log_file
3787                         try:
3788                                 bintree.digestCheck(pkg)
3789                         except portage.exception.FileNotFound:
3790                                 writemsg("!!! Fetching Binary failed " + \
3791                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3792                                 rval = 1
3793                         except portage.exception.DigestException, e:
3794                                 writemsg("\n!!! Digest verification failed:\n",
3795                                         noiselevel=-1)
3796                                 writemsg("!!! %s\n" % e.value[0],
3797                                         noiselevel=-1)
3798                                 writemsg("!!! Reason: %s\n" % e.value[1],
3799                                         noiselevel=-1)
3800                                 writemsg("!!! Got: %s\n" % e.value[2],
3801                                         noiselevel=-1)
3802                                 writemsg("!!! Expected: %s\n" % e.value[3],
3803                                         noiselevel=-1)
3804                                 rval = 1
3805                         if rval != os.EX_OK:
3806                                 pkg_path = bintree.getname(pkg.cpv)
3807                                 head, tail = os.path.split(pkg_path)
3808                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3809                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3810                                         noiselevel=-1)
3811                 finally:
3812                         sys.stdout = stdout_orig
3813                         sys.stderr = stderr_orig
3814                         if log_file is not None:
3815                                 log_file.close()
3816
3817                 self.returncode = rval
3818                 self.wait()
3819
3820 class BinpkgPrefetcher(CompositeTask):
3821
3822         __slots__ = ("pkg",) + \
3823                 ("pkg_path", "_bintree",)
3824
3825         def _start(self):
3826                 self._bintree = self.pkg.root_config.trees["bintree"]
3827                 fetcher = BinpkgFetcher(background=self.background,
3828                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3829                         scheduler=self.scheduler)
3830                 self.pkg_path = fetcher.pkg_path
3831                 self._start_task(fetcher, self._fetcher_exit)
3832
3833         def _fetcher_exit(self, fetcher):
3834
3835                 if self._default_exit(fetcher) != os.EX_OK:
3836                         self.wait()
3837                         return
3838
3839                 verifier = BinpkgVerifier(background=self.background,
3840                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3841                 self._start_task(verifier, self._verifier_exit)
3842
3843         def _verifier_exit(self, verifier):
3844                 if self._default_exit(verifier) != os.EX_OK:
3845                         self.wait()
3846                         return
3847
3848                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3849
3850                 self._current_task = None
3851                 self.returncode = os.EX_OK
3852                 self.wait()
3853
3854 class BinpkgExtractorAsync(SpawnProcess):
3855
3856         __slots__ = ("image_dir", "pkg", "pkg_path")
3857
3858         _shell_binary = portage.const.BASH_BINARY
3859
3860         def _start(self):
3861                 self.args = [self._shell_binary, "-c",
3862                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3863                         (portage._shell_quote(self.pkg_path),
3864                         portage._shell_quote(self.image_dir))]
3865
3866                 self.env = self.pkg.root_config.settings.environ()
3867                 SpawnProcess._start(self)
3868
3869 class MergeListItem(CompositeTask):
3870
3871         """
3872         TODO: For parallel scheduling, everything here needs asynchronous
3873         execution support (start, poll, and wait methods).
3874         """
3875
3876         __slots__ = ("args_set",
3877                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3878                 "find_blockers", "logger", "mtimedb", "pkg",
3879                 "pkg_count", "pkg_to_replace", "prefetcher",
3880                 "settings", "statusMessage", "world_atom") + \
3881                 ("_install_task",)
3882
3883         def _start(self):
3884
3885                 pkg = self.pkg
3886                 build_opts = self.build_opts
3887
3888                 if pkg.installed:
3889                         # uninstall,  executed by self.merge()
3890                         self.returncode = os.EX_OK
3891                         self.wait()
3892                         return
3893
3894                 args_set = self.args_set
3895                 find_blockers = self.find_blockers
3896                 logger = self.logger
3897                 mtimedb = self.mtimedb
3898                 pkg_count = self.pkg_count
3899                 scheduler = self.scheduler
3900                 settings = self.settings
3901                 world_atom = self.world_atom
3902                 ldpath_mtimes = mtimedb["ldpath"]
3903
3904                 action_desc = "Emerging"
3905                 preposition = "for"
3906                 if pkg.type_name == "binary":
3907                         action_desc += " binary"
3908
3909                 if build_opts.fetchonly:
3910                         action_desc = "Fetching"
3911
3912                 msg = "%s (%s of %s) %s" % \
3913                         (action_desc,
3914                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3915                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3916                         colorize("GOOD", pkg.cpv))
3917
3918                 portdb = pkg.root_config.trees["porttree"].dbapi
3919                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3920                 if portdir_repo_name:
3921                         pkg_repo_name = pkg.metadata.get("repository")
3922                         if pkg_repo_name != portdir_repo_name:
3923                                 if not pkg_repo_name:
3924                                         pkg_repo_name = "unknown repo"
3925                                 msg += " from %s" % pkg_repo_name
3926
3927                 if pkg.root != "/":
3928                         msg += " %s %s" % (preposition, pkg.root)
3929
3930                 if not build_opts.pretend:
3931                         self.statusMessage(msg)
3932                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3933                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3934
3935                 if pkg.type_name == "ebuild":
3936
3937                         build = EbuildBuild(args_set=args_set,
3938                                 background=self.background,
3939                                 config_pool=self.config_pool,
3940                                 find_blockers=find_blockers,
3941                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3942                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3943                                 prefetcher=self.prefetcher, scheduler=scheduler,
3944                                 settings=settings, world_atom=world_atom)
3945
3946                         self._install_task = build
3947                         self._start_task(build, self._default_final_exit)
3948                         return
3949
3950                 elif pkg.type_name == "binary":
3951
3952                         binpkg = Binpkg(background=self.background,
3953                                 find_blockers=find_blockers,
3954                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3955                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3956                                 prefetcher=self.prefetcher, settings=settings,
3957                                 scheduler=scheduler, world_atom=world_atom)
3958
3959                         self._install_task = binpkg
3960                         self._start_task(binpkg, self._default_final_exit)
3961                         return
3962
3963         def _poll(self):
3964                 self._install_task.poll()
3965                 return self.returncode
3966
3967         def _wait(self):
3968                 self._install_task.wait()
3969                 return self.returncode
3970
3971         def merge(self):
3972
3973                 pkg = self.pkg
3974                 build_opts = self.build_opts
3975                 find_blockers = self.find_blockers
3976                 logger = self.logger
3977                 mtimedb = self.mtimedb
3978                 pkg_count = self.pkg_count
3979                 prefetcher = self.prefetcher
3980                 scheduler = self.scheduler
3981                 settings = self.settings
3982                 world_atom = self.world_atom
3983                 ldpath_mtimes = mtimedb["ldpath"]
3984
3985                 if pkg.installed:
3986                         if not (build_opts.buildpkgonly or \
3987                                 build_opts.fetchonly or build_opts.pretend):
3988
3989                                 uninstall = PackageUninstall(background=self.background,
3990                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3991                                         pkg=pkg, scheduler=scheduler, settings=settings)
3992
3993                                 uninstall.start()
3994                                 retval = uninstall.wait()
3995                                 if retval != os.EX_OK:
3996                                         return retval
3997                         return os.EX_OK
3998
3999                 if build_opts.fetchonly or \
4000                         build_opts.buildpkgonly:
4001                         return self.returncode
4002
4003                 retval = self._install_task.install()
4004                 return retval
4005
4006 class PackageMerge(AsynchronousTask):
4007         """
4008         TODO: Implement asynchronous merge so that the scheduler can
4009         run while a merge is executing.
4010         """
4011
4012         __slots__ = ("merge",)
4013
4014         def _start(self):
4015
4016                 pkg = self.merge.pkg
4017                 pkg_count = self.merge.pkg_count
4018
4019                 if pkg.installed:
4020                         action_desc = "Uninstalling"
4021                         preposition = "from"
4022                 else:
4023                         action_desc = "Installing"
4024                         preposition = "to"
4025
4026                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4027
4028                 if pkg.root != "/":
4029                         msg += " %s %s" % (preposition, pkg.root)
4030
4031                 if not self.merge.build_opts.fetchonly and \
4032                         not self.merge.build_opts.pretend and \
4033                         not self.merge.build_opts.buildpkgonly:
4034                         self.merge.statusMessage(msg)
4035
4036                 self.returncode = self.merge.merge()
4037                 self.wait()
4038
4039 class DependencyArg(object):
4040         def __init__(self, arg=None, root_config=None):
4041                 self.arg = arg
4042                 self.root_config = root_config
4043
4044         def __str__(self):
4045                 return str(self.arg)
4046
4047 class AtomArg(DependencyArg):
4048         def __init__(self, atom=None, **kwargs):
4049                 DependencyArg.__init__(self, **kwargs)
4050                 self.atom = atom
4051                 if not isinstance(self.atom, portage.dep.Atom):
4052                         self.atom = portage.dep.Atom(self.atom)
4053                 self.set = (self.atom, )
4054
4055 class PackageArg(DependencyArg):
4056         def __init__(self, package=None, **kwargs):
4057                 DependencyArg.__init__(self, **kwargs)
4058                 self.package = package
4059                 self.atom = portage.dep.Atom("=" + package.cpv)
4060                 self.set = (self.atom, )
4061
4062 class SetArg(DependencyArg):
4063         def __init__(self, set=None, **kwargs):
4064                 DependencyArg.__init__(self, **kwargs)
4065                 self.set = set
4066                 self.name = self.arg[len(SETPREFIX):]
4067
4068 class Dependency(SlotObject):
4069         __slots__ = ("atom", "blocker", "depth",
4070                 "parent", "onlydeps", "priority", "root")
4071         def __init__(self, **kwargs):
4072                 SlotObject.__init__(self, **kwargs)
4073                 if self.priority is None:
4074                         self.priority = DepPriority()
4075                 if self.depth is None:
4076                         self.depth = 0
4077
4078 class BlockerCache(portage.cache.mappings.MutableMapping):
4079         """This caches blockers of installed packages so that dep_check does not
4080         have to be done for every single installed package on every invocation of
4081         emerge.  The cache is invalidated whenever it is detected that something
4082         has changed that might alter the results of dep_check() calls:
4083                 1) the set of installed packages (including COUNTER) has changed
4084                 2) the old-style virtuals have changed
4085         """
4086
4087         # Number of uncached packages to trigger cache update, since
4088         # it's wasteful to update it for every vdb change.
4089         _cache_threshold = 5
4090
4091         class BlockerData(object):
4092
4093                 __slots__ = ("__weakref__", "atoms", "counter")
4094
4095                 def __init__(self, counter, atoms):
4096                         self.counter = counter
4097                         self.atoms = atoms
4098
4099         def __init__(self, myroot, vardb):
4100                 self._vardb = vardb
4101                 self._virtuals = vardb.settings.getvirtuals()
4102                 self._cache_filename = os.path.join(myroot,
4103                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4104                 self._cache_version = "1"
4105                 self._cache_data = None
4106                 self._modified = set()
4107                 self._load()
4108
4109         def _load(self):
4110                 try:
4111                         f = open(self._cache_filename, mode='rb')
4112                         mypickle = pickle.Unpickler(f)
4113                         try:
4114                                 mypickle.find_global = None
4115                         except AttributeError:
4116                                 # TODO: If py3k, override Unpickler.find_class().
4117                                 pass
4118                         self._cache_data = mypickle.load()
4119                         f.close()
4120                         del f
4121                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4122                         if isinstance(e, pickle.UnpicklingError):
4123                                 writemsg("!!! Error loading '%s': %s\n" % \
4124                                         (self._cache_filename, str(e)), noiselevel=-1)
4125                         del e
4126
4127                 cache_valid = self._cache_data and \
4128                         isinstance(self._cache_data, dict) and \
4129                         self._cache_data.get("version") == self._cache_version and \
4130                         isinstance(self._cache_data.get("blockers"), dict)
4131                 if cache_valid:
4132                         # Validate all the atoms and counters so that
4133                         # corruption is detected as soon as possible.
4134                         invalid_items = set()
4135                         for k, v in self._cache_data["blockers"].iteritems():
4136                                 if not isinstance(k, basestring):
4137                                         invalid_items.add(k)
4138                                         continue
4139                                 try:
4140                                         if portage.catpkgsplit(k) is None:
4141                                                 invalid_items.add(k)
4142                                                 continue
4143                                 except portage.exception.InvalidData:
4144                                         invalid_items.add(k)
4145                                         continue
4146                                 if not isinstance(v, tuple) or \
4147                                         len(v) != 2:
4148                                         invalid_items.add(k)
4149                                         continue
4150                                 counter, atoms = v
4151                                 if not isinstance(counter, (int, long)):
4152                                         invalid_items.add(k)
4153                                         continue
4154                                 if not isinstance(atoms, (list, tuple)):
4155                                         invalid_items.add(k)
4156                                         continue
4157                                 invalid_atom = False
4158                                 for atom in atoms:
4159                                         if not isinstance(atom, basestring):
4160                                                 invalid_atom = True
4161                                                 break
4162                                         if atom[:1] != "!" or \
4163                                                 not portage.isvalidatom(
4164                                                 atom, allow_blockers=True):
4165                                                 invalid_atom = True
4166                                                 break
4167                                 if invalid_atom:
4168                                         invalid_items.add(k)
4169                                         continue
4170
4171                         for k in invalid_items:
4172                                 del self._cache_data["blockers"][k]
4173                         if not self._cache_data["blockers"]:
4174                                 cache_valid = False
4175
4176                 if not cache_valid:
4177                         self._cache_data = {"version":self._cache_version}
4178                         self._cache_data["blockers"] = {}
4179                         self._cache_data["virtuals"] = self._virtuals
4180                 self._modified.clear()
4181
4182         def flush(self):
4183                 """If the current user has permission and the internal blocker cache
4184                 been updated, save it to disk and mark it unmodified.  This is called
4185                 by emerge after it has proccessed blockers for all installed packages.
4186                 Currently, the cache is only written if the user has superuser
4187                 privileges (since that's required to obtain a lock), but all users
4188                 have read access and benefit from faster blocker lookups (as long as
4189                 the entire cache is still valid).  The cache is stored as a pickled
4190                 dict object with the following format:
4191
4192                 {
4193                         version : "1",
4194                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4195                         "virtuals" : vardb.settings.getvirtuals()
4196                 }
4197                 """
4198                 if len(self._modified) >= self._cache_threshold and \
4199                         secpass >= 2:
4200                         try:
4201                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4202                                 pickle.dump(self._cache_data, f, protocol=2)
4203                                 f.close()
4204                                 portage.util.apply_secpass_permissions(
4205                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4206                         except (IOError, OSError), e:
4207                                 pass
4208                         self._modified.clear()
4209
4210         def __setitem__(self, cpv, blocker_data):
4211                 """
4212                 Update the cache and mark it as modified for a future call to
4213                 self.flush().
4214
4215                 @param cpv: Package for which to cache blockers.
4216                 @type cpv: String
4217                 @param blocker_data: An object with counter and atoms attributes.
4218                 @type blocker_data: BlockerData
4219                 """
4220                 self._cache_data["blockers"][cpv] = \
4221                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4222                 self._modified.add(cpv)
4223
4224         def __iter__(self):
4225                 if self._cache_data is None:
4226                         # triggered by python-trace
4227                         return iter([])
4228                 return iter(self._cache_data["blockers"])
4229
4230         def __delitem__(self, cpv):
4231                 del self._cache_data["blockers"][cpv]
4232
4233         def __getitem__(self, cpv):
4234                 """
4235                 @rtype: BlockerData
4236                 @returns: An object with counter and atoms attributes.
4237                 """
4238                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4239
4240 class BlockerDB(object):
4241
4242         def __init__(self, root_config):
4243                 self._root_config = root_config
4244                 self._vartree = root_config.trees["vartree"]
4245                 self._portdb = root_config.trees["porttree"].dbapi
4246
4247                 self._dep_check_trees = None
4248                 self._fake_vartree = None
4249
4250         def _get_fake_vartree(self, acquire_lock=0):
4251                 fake_vartree = self._fake_vartree
4252                 if fake_vartree is None:
4253                         fake_vartree = FakeVartree(self._root_config,
4254                                 acquire_lock=acquire_lock)
4255                         self._fake_vartree = fake_vartree
4256                         self._dep_check_trees = { self._vartree.root : {
4257                                 "porttree"    :  fake_vartree,
4258                                 "vartree"     :  fake_vartree,
4259                         }}
4260                 else:
4261                         fake_vartree.sync(acquire_lock=acquire_lock)
4262                 return fake_vartree
4263
4264         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4265                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4266                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4267                 settings = self._vartree.settings
4268                 stale_cache = set(blocker_cache)
4269                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4270                 dep_check_trees = self._dep_check_trees
4271                 vardb = fake_vartree.dbapi
4272                 installed_pkgs = list(vardb)
4273
4274                 for inst_pkg in installed_pkgs:
4275                         stale_cache.discard(inst_pkg.cpv)
4276                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4277                         if cached_blockers is not None and \
4278                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4279                                 cached_blockers = None
4280                         if cached_blockers is not None:
4281                                 blocker_atoms = cached_blockers.atoms
4282                         else:
4283                                 # Use aux_get() to trigger FakeVartree global
4284                                 # updates on *DEPEND when appropriate.
4285                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4286                                 try:
4287                                         portage.dep._dep_check_strict = False
4288                                         success, atoms = portage.dep_check(depstr,
4289                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4290                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4291                                 finally:
4292                                         portage.dep._dep_check_strict = True
4293                                 if not success:
4294                                         pkg_location = os.path.join(inst_pkg.root,
4295                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4296                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4297                                                 (pkg_location, atoms), noiselevel=-1)
4298                                         continue
4299
4300                                 blocker_atoms = [atom for atom in atoms \
4301                                         if atom.startswith("!")]
4302                                 blocker_atoms.sort()
4303                                 counter = long(inst_pkg.metadata["COUNTER"])
4304                                 blocker_cache[inst_pkg.cpv] = \
4305                                         blocker_cache.BlockerData(counter, blocker_atoms)
4306                 for cpv in stale_cache:
4307                         del blocker_cache[cpv]
4308                 blocker_cache.flush()
4309
4310                 blocker_parents = digraph()
4311                 blocker_atoms = []
4312                 for pkg in installed_pkgs:
4313                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4314                                 blocker_atom = blocker_atom.lstrip("!")
4315                                 blocker_atoms.append(blocker_atom)
4316                                 blocker_parents.add(blocker_atom, pkg)
4317
4318                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4319                 blocking_pkgs = set()
4320                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4321                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4322
4323                 # Check for blockers in the other direction.
4324                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4325                 try:
4326                         portage.dep._dep_check_strict = False
4327                         success, atoms = portage.dep_check(depstr,
4328                                 vardb, settings, myuse=new_pkg.use.enabled,
4329                                 trees=dep_check_trees, myroot=new_pkg.root)
4330                 finally:
4331                         portage.dep._dep_check_strict = True
4332                 if not success:
4333                         # We should never get this far with invalid deps.
4334                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4335                         assert False
4336
4337                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4338                         if atom[:1] == "!"]
4339                 if blocker_atoms:
4340                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341                         for inst_pkg in installed_pkgs:
4342                                 try:
4343                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4344                                 except (portage.exception.InvalidDependString, StopIteration):
4345                                         continue
4346                                 blocking_pkgs.add(inst_pkg)
4347
4348                 return blocking_pkgs
4349
4350 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4351
4352         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4353                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4354         p_type, p_root, p_key, p_status = parent_node
4355         msg = []
4356         if p_status == "nomerge":
4357                 category, pf = portage.catsplit(p_key)
4358                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4359                 msg.append("Portage is unable to process the dependencies of the ")
4360                 msg.append("'%s' package. " % p_key)
4361                 msg.append("In order to correct this problem, the package ")
4362                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4363                 msg.append("As a temporary workaround, the --nodeps option can ")
4364                 msg.append("be used to ignore all dependencies.  For reference, ")
4365                 msg.append("the problematic dependencies can be found in the ")
4366                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4367         else:
4368                 msg.append("This package can not be installed. ")
4369                 msg.append("Please notify the '%s' package maintainer " % p_key)
4370                 msg.append("about this problem.")
4371
4372         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4373         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4374
4375 class PackageVirtualDbapi(portage.dbapi):
4376         """
4377         A dbapi-like interface class that represents the state of the installed
4378         package database as new packages are installed, replacing any packages
4379         that previously existed in the same slot. The main difference between
4380         this class and fakedbapi is that this one uses Package instances
4381         internally (passed in via cpv_inject() and cpv_remove() calls).
4382         """
4383         def __init__(self, settings):
4384                 portage.dbapi.__init__(self)
4385                 self.settings = settings
4386                 self._match_cache = {}
4387                 self._cp_map = {}
4388                 self._cpv_map = {}
4389
4390         def clear(self):
4391                 """
4392                 Remove all packages.
4393                 """
4394                 if self._cpv_map:
4395                         self._clear_cache()
4396                         self._cp_map.clear()
4397                         self._cpv_map.clear()
4398
4399         def copy(self):
4400                 obj = PackageVirtualDbapi(self.settings)
4401                 obj._match_cache = self._match_cache.copy()
4402                 obj._cp_map = self._cp_map.copy()
4403                 for k, v in obj._cp_map.iteritems():
4404                         obj._cp_map[k] = v[:]
4405                 obj._cpv_map = self._cpv_map.copy()
4406                 return obj
4407
4408         def __iter__(self):
4409                 return self._cpv_map.itervalues()
4410
4411         def __contains__(self, item):
4412                 existing = self._cpv_map.get(item.cpv)
4413                 if existing is not None and \
4414                         existing == item:
4415                         return True
4416                 return False
4417
4418         def get(self, item, default=None):
4419                 cpv = getattr(item, "cpv", None)
4420                 if cpv is None:
4421                         if len(item) != 4:
4422                                 return default
4423                         type_name, root, cpv, operation = item
4424
4425                 existing = self._cpv_map.get(cpv)
4426                 if existing is not None and \
4427                         existing == item:
4428                         return existing
4429                 return default
4430
4431         def match_pkgs(self, atom):
4432                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4433
4434         def _clear_cache(self):
4435                 if self._categories is not None:
4436                         self._categories = None
4437                 if self._match_cache:
4438                         self._match_cache = {}
4439
4440         def match(self, origdep, use_cache=1):
4441                 result = self._match_cache.get(origdep)
4442                 if result is not None:
4443                         return result[:]
4444                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4445                 self._match_cache[origdep] = result
4446                 return result[:]
4447
4448         def cpv_exists(self, cpv):
4449                 return cpv in self._cpv_map
4450
4451         def cp_list(self, mycp, use_cache=1):
4452                 cachelist = self._match_cache.get(mycp)
4453                 # cp_list() doesn't expand old-style virtuals
4454                 if cachelist and cachelist[0].startswith(mycp):
4455                         return cachelist[:]
4456                 cpv_list = self._cp_map.get(mycp)
4457                 if cpv_list is None:
4458                         cpv_list = []
4459                 else:
4460                         cpv_list = [pkg.cpv for pkg in cpv_list]
4461                 self._cpv_sort_ascending(cpv_list)
4462                 if not (not cpv_list and mycp.startswith("virtual/")):
4463                         self._match_cache[mycp] = cpv_list
4464                 return cpv_list[:]
4465
4466         def cp_all(self):
4467                 return list(self._cp_map)
4468
4469         def cpv_all(self):
4470                 return list(self._cpv_map)
4471
4472         def cpv_inject(self, pkg):
4473                 cp_list = self._cp_map.get(pkg.cp)
4474                 if cp_list is None:
4475                         cp_list = []
4476                         self._cp_map[pkg.cp] = cp_list
4477                 e_pkg = self._cpv_map.get(pkg.cpv)
4478                 if e_pkg is not None:
4479                         if e_pkg == pkg:
4480                                 return
4481                         self.cpv_remove(e_pkg)
4482                 for e_pkg in cp_list:
4483                         if e_pkg.slot_atom == pkg.slot_atom:
4484                                 if e_pkg == pkg:
4485                                         return
4486                                 self.cpv_remove(e_pkg)
4487                                 break
4488                 cp_list.append(pkg)
4489                 self._cpv_map[pkg.cpv] = pkg
4490                 self._clear_cache()
4491
4492         def cpv_remove(self, pkg):
4493                 old_pkg = self._cpv_map.get(pkg.cpv)
4494                 if old_pkg != pkg:
4495                         raise KeyError(pkg)
4496                 self._cp_map[pkg.cp].remove(pkg)
4497                 del self._cpv_map[pkg.cpv]
4498                 self._clear_cache()
4499
4500         def aux_get(self, cpv, wants):
4501                 metadata = self._cpv_map[cpv].metadata
4502                 return [metadata.get(x, "") for x in wants]
4503
4504         def aux_update(self, cpv, values):
4505                 self._cpv_map[cpv].metadata.update(values)
4506                 self._clear_cache()
4507
4508 class depgraph(object):
4509
4510         pkg_tree_map = RootConfig.pkg_tree_map
4511
4512         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4513
4514         def __init__(self, settings, trees, myopts, myparams, spinner):
4515                 self.settings = settings
4516                 self.target_root = settings["ROOT"]
4517                 self.myopts = myopts
4518                 self.myparams = myparams
4519                 self.edebug = 0
4520                 if settings.get("PORTAGE_DEBUG", "") == "1":
4521                         self.edebug = 1
4522                 self.spinner = spinner
4523                 self._running_root = trees["/"]["root_config"]
4524                 self._opts_no_restart = Scheduler._opts_no_restart
4525                 self.pkgsettings = {}
4526                 # Maps slot atom to package for each Package added to the graph.
4527                 self._slot_pkg_map = {}
4528                 # Maps nodes to the reasons they were selected for reinstallation.
4529                 self._reinstall_nodes = {}
4530                 self.mydbapi = {}
4531                 self.trees = {}
4532                 self._trees_orig = trees
4533                 self.roots = {}
4534                 # Contains a filtered view of preferred packages that are selected
4535                 # from available repositories.
4536                 self._filtered_trees = {}
4537                 # Contains installed packages and new packages that have been added
4538                 # to the graph.
4539                 self._graph_trees = {}
4540                 # All Package instances
4541                 self._pkg_cache = {}
4542                 for myroot in trees:
4543                         self.trees[myroot] = {}
4544                         # Create a RootConfig instance that references
4545                         # the FakeVartree instead of the real one.
4546                         self.roots[myroot] = RootConfig(
4547                                 trees[myroot]["vartree"].settings,
4548                                 self.trees[myroot],
4549                                 trees[myroot]["root_config"].setconfig)
4550                         for tree in ("porttree", "bintree"):
4551                                 self.trees[myroot][tree] = trees[myroot][tree]
4552                         self.trees[myroot]["vartree"] = \
4553                                 FakeVartree(trees[myroot]["root_config"],
4554                                         pkg_cache=self._pkg_cache)
4555                         self.pkgsettings[myroot] = portage.config(
4556                                 clone=self.trees[myroot]["vartree"].settings)
4557                         self._slot_pkg_map[myroot] = {}
4558                         vardb = self.trees[myroot]["vartree"].dbapi
4559                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4560                                 "--buildpkgonly" not in self.myopts
4561                         # This fakedbapi instance will model the state that the vdb will
4562                         # have after new packages have been installed.
4563                         fakedb = PackageVirtualDbapi(vardb.settings)
4564                         if preload_installed_pkgs:
4565                                 for pkg in vardb:
4566                                         self.spinner.update()
4567                                         # This triggers metadata updates via FakeVartree.
4568                                         vardb.aux_get(pkg.cpv, [])
4569                                         fakedb.cpv_inject(pkg)
4570
4571                         # Now that the vardb state is cached in our FakeVartree,
4572                         # we won't be needing the real vartree cache for awhile.
4573                         # To make some room on the heap, clear the vardbapi
4574                         # caches.
4575                         trees[myroot]["vartree"].dbapi._clear_cache()
4576                         gc.collect()
4577
4578                         self.mydbapi[myroot] = fakedb
4579                         def graph_tree():
4580                                 pass
4581                         graph_tree.dbapi = fakedb
4582                         self._graph_trees[myroot] = {}
4583                         self._filtered_trees[myroot] = {}
4584                         # Substitute the graph tree for the vartree in dep_check() since we
4585                         # want atom selections to be consistent with package selections
4586                         # have already been made.
4587                         self._graph_trees[myroot]["porttree"]   = graph_tree
4588                         self._graph_trees[myroot]["vartree"]    = graph_tree
4589                         def filtered_tree():
4590                                 pass
4591                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4592                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4593
4594                         # Passing in graph_tree as the vartree here could lead to better
4595                         # atom selections in some cases by causing atoms for packages that
4596                         # have been added to the graph to be preferred over other choices.
4597                         # However, it can trigger atom selections that result in
4598                         # unresolvable direct circular dependencies. For example, this
4599                         # happens with gwydion-dylan which depends on either itself or
4600                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4601                         # gwydion-dylan-bin needs to be selected in order to avoid a
4602                         # an unresolvable direct circular dependency.
4603                         #
4604                         # To solve the problem described above, pass in "graph_db" so that
4605                         # packages that have been added to the graph are distinguishable
4606                         # from other available packages and installed packages. Also, pass
4607                         # the parent package into self._select_atoms() calls so that
4608                         # unresolvable direct circular dependencies can be detected and
4609                         # avoided when possible.
4610                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4611                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4612
4613                         dbs = []
4614                         portdb = self.trees[myroot]["porttree"].dbapi
4615                         bindb  = self.trees[myroot]["bintree"].dbapi
4616                         vardb  = self.trees[myroot]["vartree"].dbapi
4617                         #               (db, pkg_type, built, installed, db_keys)
4618                         if "--usepkgonly" not in self.myopts:
4619                                 db_keys = list(portdb._aux_cache_keys)
4620                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4621                         if "--usepkg" in self.myopts:
4622                                 db_keys = list(bindb._aux_cache_keys)
4623                                 dbs.append((bindb,  "binary", True, False, db_keys))
4624                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4625                         dbs.append((vardb, "installed", True, True, db_keys))
4626                         self._filtered_trees[myroot]["dbs"] = dbs
4627                         if "--usepkg" in self.myopts:
4628                                 self.trees[myroot]["bintree"].populate(
4629                                         "--getbinpkg" in self.myopts,
4630                                         "--getbinpkgonly" in self.myopts)
4631                 del trees
4632
4633                 self.digraph=portage.digraph()
4634                 # contains all sets added to the graph
4635                 self._sets = {}
4636                 # contains atoms given as arguments
4637                 self._sets["args"] = InternalPackageSet()
4638                 # contains all atoms from all sets added to the graph, including
4639                 # atoms given as arguments
4640                 self._set_atoms = InternalPackageSet()
4641                 self._atom_arg_map = {}
4642                 # contains all nodes pulled in by self._set_atoms
4643                 self._set_nodes = set()
4644                 # Contains only Blocker -> Uninstall edges
4645                 self._blocker_uninstalls = digraph()
4646                 # Contains only Package -> Blocker edges
4647                 self._blocker_parents = digraph()
4648                 # Contains only irrelevant Package -> Blocker edges
4649                 self._irrelevant_blockers = digraph()
4650                 # Contains only unsolvable Package -> Blocker edges
4651                 self._unsolvable_blockers = digraph()
4652                 # Contains all Blocker -> Blocked Package edges
4653                 self._blocked_pkgs = digraph()
4654                 # Contains world packages that have been protected from
4655                 # uninstallation but may not have been added to the graph
4656                 # if the graph is not complete yet.
4657                 self._blocked_world_pkgs = {}
4658                 self._slot_collision_info = {}
4659                 # Slot collision nodes are not allowed to block other packages since
4660                 # blocker validation is only able to account for one package per slot.
4661                 self._slot_collision_nodes = set()
4662                 self._parent_atoms = {}
4663                 self._slot_conflict_parent_atoms = set()
4664                 self._serialized_tasks_cache = None
4665                 self._scheduler_graph = None
4666                 self._displayed_list = None
4667                 self._pprovided_args = []
4668                 self._missing_args = []
4669                 self._masked_installed = set()
4670                 self._unsatisfied_deps_for_display = []
4671                 self._unsatisfied_blockers_for_display = None
4672                 self._circular_deps_for_display = None
4673                 self._dep_stack = []
4674                 self._unsatisfied_deps = []
4675                 self._initially_unsatisfied_deps = []
4676                 self._ignored_deps = []
4677                 self._required_set_names = set(["system", "world"])
4678                 self._select_atoms = self._select_atoms_highest_available
4679                 self._select_package = self._select_pkg_highest_available
4680                 self._highest_pkg_cache = {}
4681
4682         def _show_slot_collision_notice(self):
4683                 """Show an informational message advising the user to mask one of the
4684                 the packages. In some cases it may be possible to resolve this
4685                 automatically, but support for backtracking (removal nodes that have
4686                 already been selected) will be required in order to handle all possible
4687                 cases.
4688                 """
4689
4690                 if not self._slot_collision_info:
4691                         return
4692
4693                 self._show_merge_list()
4694
4695                 msg = []
4696                 msg.append("\n!!! Multiple package instances within a single " + \
4697                         "package slot have been pulled\n")
4698                 msg.append("!!! into the dependency graph, resulting" + \
4699                         " in a slot conflict:\n\n")
4700                 indent = "  "
4701                 # Max number of parents shown, to avoid flooding the display.
4702                 max_parents = 3
4703                 explanation_columns = 70
4704                 explanations = 0
4705                 for (slot_atom, root), slot_nodes \
4706                         in self._slot_collision_info.iteritems():
4707                         msg.append(str(slot_atom))
4708                         msg.append("\n\n")
4709
4710                         for node in slot_nodes:
4711                                 msg.append(indent)
4712                                 msg.append(str(node))
4713                                 parent_atoms = self._parent_atoms.get(node)
4714                                 if parent_atoms:
4715                                         pruned_list = set()
4716                                         # Prefer conflict atoms over others.
4717                                         for parent_atom in parent_atoms:
4718                                                 if len(pruned_list) >= max_parents:
4719                                                         break
4720                                                 if parent_atom in self._slot_conflict_parent_atoms:
4721                                                         pruned_list.add(parent_atom)
4722
4723                                         # If this package was pulled in by conflict atoms then
4724                                         # show those alone since those are the most interesting.
4725                                         if not pruned_list:
4726                                                 # When generating the pruned list, prefer instances
4727                                                 # of DependencyArg over instances of Package.
4728                                                 for parent_atom in parent_atoms:
4729                                                         if len(pruned_list) >= max_parents:
4730                                                                 break
4731                                                         parent, atom = parent_atom
4732                                                         if isinstance(parent, DependencyArg):
4733                                                                 pruned_list.add(parent_atom)
4734                                                 # Prefer Packages instances that themselves have been
4735                                                 # pulled into collision slots.
4736                                                 for parent_atom in parent_atoms:
4737                                                         if len(pruned_list) >= max_parents:
4738                                                                 break
4739                                                         parent, atom = parent_atom
4740                                                         if isinstance(parent, Package) and \
4741                                                                 (parent.slot_atom, parent.root) \
4742                                                                 in self._slot_collision_info:
4743                                                                 pruned_list.add(parent_atom)
4744                                                 for parent_atom in parent_atoms:
4745                                                         if len(pruned_list) >= max_parents:
4746                                                                 break
4747                                                         pruned_list.add(parent_atom)
4748                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4749                                         parent_atoms = pruned_list
4750                                         msg.append(" pulled in by\n")
4751                                         for parent_atom in parent_atoms:
4752                                                 parent, atom = parent_atom
4753                                                 msg.append(2*indent)
4754                                                 if isinstance(parent,
4755                                                         (PackageArg, AtomArg)):
4756                                                         # For PackageArg and AtomArg types, it's
4757                                                         # redundant to display the atom attribute.
4758                                                         msg.append(str(parent))
4759                                                 else:
4760                                                         # Display the specific atom from SetArg or
4761                                                         # Package types.
4762                                                         msg.append("%s required by %s" % (atom, parent))
4763                                                 msg.append("\n")
4764                                         if omitted_parents:
4765                                                 msg.append(2*indent)
4766                                                 msg.append("(and %d more)\n" % omitted_parents)
4767                                 else:
4768                                         msg.append(" (no parents)\n")
4769                                 msg.append("\n")
4770                         explanation = self._slot_conflict_explanation(slot_nodes)
4771                         if explanation:
4772                                 explanations += 1
4773                                 msg.append(indent + "Explanation:\n\n")
4774                                 for line in textwrap.wrap(explanation, explanation_columns):
4775                                         msg.append(2*indent + line + "\n")
4776                                 msg.append("\n")
4777                 msg.append("\n")
4778                 sys.stderr.write("".join(msg))
4779                 sys.stderr.flush()
4780
4781                 explanations_for_all = explanations == len(self._slot_collision_info)
4782
4783                 if explanations_for_all or "--quiet" in self.myopts:
4784                         return
4785
4786                 msg = []
4787                 msg.append("It may be possible to solve this problem ")
4788                 msg.append("by using package.mask to prevent one of ")
4789                 msg.append("those packages from being selected. ")
4790                 msg.append("However, it is also possible that conflicting ")
4791                 msg.append("dependencies exist such that they are impossible to ")
4792                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4793                 msg.append("the dependencies of two different packages, then those ")
4794                 msg.append("packages can not be installed simultaneously.")
4795
4796                 from formatter import AbstractFormatter, DumbWriter
4797                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4798                 for x in msg:
4799                         f.add_flowing_data(x)
4800                 f.end_paragraph(1)
4801
4802                 msg = []
4803                 msg.append("For more information, see MASKED PACKAGES ")
4804                 msg.append("section in the emerge man page or refer ")
4805                 msg.append("to the Gentoo Handbook.")
4806                 for x in msg:
4807                         f.add_flowing_data(x)
4808                 f.end_paragraph(1)
4809                 f.writer.flush()
4810
4811         def _slot_conflict_explanation(self, slot_nodes):
4812                 """
4813                 When a slot conflict occurs due to USE deps, there are a few
4814                 different cases to consider:
4815
4816                 1) New USE are correctly set but --newuse wasn't requested so an
4817                    installed package with incorrect USE happened to get pulled
4818                    into graph before the new one.
4819
4820                 2) New USE are incorrectly set but an installed package has correct
4821                    USE so it got pulled into the graph, and a new instance also got
4822                    pulled in due to --newuse or an upgrade.
4823
4824                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4825                    and multiple package instances got pulled into the same slot to
4826                    satisfy the conflicting deps.
4827
4828                 Currently, explanations and suggested courses of action are generated
4829                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4830                 """
4831
4832                 if len(slot_nodes) != 2:
4833                         # Suggestions are only implemented for
4834                         # conflicts between two packages.
4835                         return None
4836
4837                 all_conflict_atoms = self._slot_conflict_parent_atoms
4838                 matched_node = None
4839                 matched_atoms = None
4840                 unmatched_node = None
4841                 for node in slot_nodes:
4842                         parent_atoms = self._parent_atoms.get(node)
4843                         if not parent_atoms:
4844                                 # Normally, there are always parent atoms. If there are
4845                                 # none then something unexpected is happening and there's
4846                                 # currently no suggestion for this case.
4847                                 return None
4848                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4849                         for parent_atom in conflict_atoms:
4850                                 parent, atom = parent_atom
4851                                 if not atom.use:
4852                                         # Suggestions are currently only implemented for cases
4853                                         # in which all conflict atoms have USE deps.
4854                                         return None
4855                         if conflict_atoms:
4856                                 if matched_node is not None:
4857                                         # If conflict atoms match multiple nodes
4858                                         # then there's no suggestion.
4859                                         return None
4860                                 matched_node = node
4861                                 matched_atoms = conflict_atoms
4862                         else:
4863                                 if unmatched_node is not None:
4864                                         # Neither node is matched by conflict atoms, and
4865                                         # there is no suggestion for this case.
4866                                         return None
4867                                 unmatched_node = node
4868
4869                 if matched_node is None or unmatched_node is None:
4870                         # This shouldn't happen.
4871                         return None
4872
4873                 if unmatched_node.installed and not matched_node.installed and \
4874                         unmatched_node.cpv == matched_node.cpv:
4875                         # If the conflicting packages are the same version then
4876                         # --newuse should be all that's needed. If they are different
4877                         # versions then there's some other problem.
4878                         return "New USE are correctly set, but --newuse wasn't" + \
4879                                 " requested, so an installed package with incorrect USE " + \
4880                                 "happened to get pulled into the dependency graph. " + \
4881                                 "In order to solve " + \
4882                                 "this, either specify the --newuse option or explicitly " + \
4883                                 " reinstall '%s'." % matched_node.slot_atom
4884
4885                 if matched_node.installed and not unmatched_node.installed:
4886                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4887                         explanation = ("New USE for '%s' are incorrectly set. " + \
4888                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4889                                 (matched_node.slot_atom, atoms[0])
4890                         if len(atoms) > 1:
4891                                 for atom in atoms[1:-1]:
4892                                         explanation += ", '%s'" % (atom,)
4893                                 if len(atoms) > 2:
4894                                         explanation += ","
4895                                 explanation += " and '%s'" % (atoms[-1],)
4896                         explanation += "."
4897                         return explanation
4898
4899                 return None
4900
4901         def _process_slot_conflicts(self):
4902                 """
4903                 Process slot conflict data to identify specific atoms which
4904                 lead to conflict. These atoms only match a subset of the
4905                 packages that have been pulled into a given slot.
4906                 """
4907                 for (slot_atom, root), slot_nodes \
4908                         in self._slot_collision_info.iteritems():
4909
4910                         all_parent_atoms = set()
4911                         for pkg in slot_nodes:
4912                                 parent_atoms = self._parent_atoms.get(pkg)
4913                                 if not parent_atoms:
4914                                         continue
4915                                 all_parent_atoms.update(parent_atoms)
4916
4917                         for pkg in slot_nodes:
4918                                 parent_atoms = self._parent_atoms.get(pkg)
4919                                 if parent_atoms is None:
4920                                         parent_atoms = set()
4921                                         self._parent_atoms[pkg] = parent_atoms
4922                                 for parent_atom in all_parent_atoms:
4923                                         if parent_atom in parent_atoms:
4924                                                 continue
4925                                         # Use package set for matching since it will match via
4926                                         # PROVIDE when necessary, while match_from_list does not.
4927                                         parent, atom = parent_atom
4928                                         atom_set = InternalPackageSet(
4929                                                 initial_atoms=(atom,))
4930                                         if atom_set.findAtomForPackage(pkg):
4931                                                 parent_atoms.add(parent_atom)
4932                                         else:
4933                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4934
4935         def _reinstall_for_flags(self, forced_flags,
4936                 orig_use, orig_iuse, cur_use, cur_iuse):
4937                 """Return a set of flags that trigger reinstallation, or None if there
4938                 are no such flags."""
4939                 if "--newuse" in self.myopts:
4940                         flags = set(orig_iuse.symmetric_difference(
4941                                 cur_iuse).difference(forced_flags))
4942                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4943                                 cur_iuse.intersection(cur_use)))
4944                         if flags:
4945                                 return flags
4946                 elif "changed-use" == self.myopts.get("--reinstall"):
4947                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4948                                 cur_iuse.intersection(cur_use))
4949                         if flags:
4950                                 return flags
4951                 return None
4952
4953         def _create_graph(self, allow_unsatisfied=False):
4954                 dep_stack = self._dep_stack
4955                 while dep_stack:
4956                         self.spinner.update()
4957                         dep = dep_stack.pop()
4958                         if isinstance(dep, Package):
4959                                 if not self._add_pkg_deps(dep,
4960                                         allow_unsatisfied=allow_unsatisfied):
4961                                         return 0
4962                                 continue
4963                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4964                                 return 0
4965                 return 1
4966
4967         def _add_dep(self, dep, allow_unsatisfied=False):
4968                 debug = "--debug" in self.myopts
4969                 buildpkgonly = "--buildpkgonly" in self.myopts
4970                 nodeps = "--nodeps" in self.myopts
4971                 empty = "empty" in self.myparams
4972                 deep = "deep" in self.myparams
4973                 update = "--update" in self.myopts and dep.depth <= 1
4974                 if dep.blocker:
4975                         if not buildpkgonly and \
4976                                 not nodeps and \
4977                                 dep.parent not in self._slot_collision_nodes:
4978                                 if dep.parent.onlydeps:
4979                                         # It's safe to ignore blockers if the
4980                                         # parent is an --onlydeps node.
4981                                         return 1
4982                                 # The blocker applies to the root where
4983                                 # the parent is or will be installed.
4984                                 blocker = Blocker(atom=dep.atom,
4985                                         eapi=dep.parent.metadata["EAPI"],
4986                                         root=dep.parent.root)
4987                                 self._blocker_parents.add(blocker, dep.parent)
4988                         return 1
4989                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4990                         onlydeps=dep.onlydeps)
4991                 if not dep_pkg:
4992                         if dep.priority.optional:
4993                                 # This could be an unecessary build-time dep
4994                                 # pulled in by --with-bdeps=y.
4995                                 return 1
4996                         if allow_unsatisfied:
4997                                 self._unsatisfied_deps.append(dep)
4998                                 return 1
4999                         self._unsatisfied_deps_for_display.append(
5000                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5001                         return 0
5002                 # In some cases, dep_check will return deps that shouldn't
5003                 # be proccessed any further, so they are identified and
5004                 # discarded here. Try to discard as few as possible since
5005                 # discarded dependencies reduce the amount of information
5006                 # available for optimization of merge order.
5007                 if dep.priority.satisfied and \
5008                         not dep_pkg.installed and \
5009                         not (existing_node or empty or deep or update):
5010                         myarg = None
5011                         if dep.root == self.target_root:
5012                                 try:
5013                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5014                                 except StopIteration:
5015                                         pass
5016                                 except portage.exception.InvalidDependString:
5017                                         if not dep_pkg.installed:
5018                                                 # This shouldn't happen since the package
5019                                                 # should have been masked.
5020                                                 raise
5021                         if not myarg:
5022                                 self._ignored_deps.append(dep)
5023                                 return 1
5024
5025                 if not self._add_pkg(dep_pkg, dep):
5026                         return 0
5027                 return 1
5028
5029         def _add_pkg(self, pkg, dep):
5030                 myparent = None
5031                 priority = None
5032                 depth = 0
5033                 if dep is None:
5034                         dep = Dependency()
5035                 else:
5036                         myparent = dep.parent
5037                         priority = dep.priority
5038                         depth = dep.depth
5039                 if priority is None:
5040                         priority = DepPriority()
5041                 """
5042                 Fills the digraph with nodes comprised of packages to merge.
5043                 mybigkey is the package spec of the package to merge.
5044                 myparent is the package depending on mybigkey ( or None )
5045                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5046                         Think --onlydeps, we need to ignore packages in that case.
5047                 #stuff to add:
5048                 #SLOT-aware emerge
5049                 #IUSE-aware emerge -> USE DEP aware depgraph
5050                 #"no downgrade" emerge
5051                 """
5052                 # Ensure that the dependencies of the same package
5053                 # are never processed more than once.
5054                 previously_added = pkg in self.digraph
5055
5056                 # select the correct /var database that we'll be checking against
5057                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5058                 pkgsettings = self.pkgsettings[pkg.root]
5059
5060                 arg_atoms = None
5061                 if True:
5062                         try:
5063                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5064                         except portage.exception.InvalidDependString, e:
5065                                 if not pkg.installed:
5066                                         show_invalid_depstring_notice(
5067                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5068                                         return 0
5069                                 del e
5070
5071                 if not pkg.onlydeps:
5072                         if not pkg.installed and \
5073                                 "empty" not in self.myparams and \
5074                                 vardbapi.match(pkg.slot_atom):
5075                                 # Increase the priority of dependencies on packages that
5076                                 # are being rebuilt. This optimizes merge order so that
5077                                 # dependencies are rebuilt/updated as soon as possible,
5078                                 # which is needed especially when emerge is called by
5079                                 # revdep-rebuild since dependencies may be affected by ABI
5080                                 # breakage that has rendered them useless. Don't adjust
5081                                 # priority here when in "empty" mode since all packages
5082                                 # are being merged in that case.
5083                                 priority.rebuild = True
5084
5085                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5086                         slot_collision = False
5087                         if existing_node:
5088                                 existing_node_matches = pkg.cpv == existing_node.cpv
5089                                 if existing_node_matches and \
5090                                         pkg != existing_node and \
5091                                         dep.atom is not None:
5092                                         # Use package set for matching since it will match via
5093                                         # PROVIDE when necessary, while match_from_list does not.
5094                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5095                                         if not atom_set.findAtomForPackage(existing_node):
5096                                                 existing_node_matches = False
5097                                 if existing_node_matches:
5098                                         # The existing node can be reused.
5099                                         if arg_atoms:
5100                                                 for parent_atom in arg_atoms:
5101                                                         parent, atom = parent_atom
5102                                                         self.digraph.add(existing_node, parent,
5103                                                                 priority=priority)
5104                                                         self._add_parent_atom(existing_node, parent_atom)
5105                                         # If a direct circular dependency is not an unsatisfied
5106                                         # buildtime dependency then drop it here since otherwise
5107                                         # it can skew the merge order calculation in an unwanted
5108                                         # way.
5109                                         if existing_node != myparent or \
5110                                                 (priority.buildtime and not priority.satisfied):
5111                                                 self.digraph.addnode(existing_node, myparent,
5112                                                         priority=priority)
5113                                                 if dep.atom is not None and dep.parent is not None:
5114                                                         self._add_parent_atom(existing_node,
5115                                                                 (dep.parent, dep.atom))
5116                                         return 1
5117                                 else:
5118
5119                                         # A slot collision has occurred.  Sometimes this coincides
5120                                         # with unresolvable blockers, so the slot collision will be
5121                                         # shown later if there are no unresolvable blockers.
5122                                         self._add_slot_conflict(pkg)
5123                                         slot_collision = True
5124
5125                         if slot_collision:
5126                                 # Now add this node to the graph so that self.display()
5127                                 # can show use flags and --tree portage.output.  This node is
5128                                 # only being partially added to the graph.  It must not be
5129                                 # allowed to interfere with the other nodes that have been
5130                                 # added.  Do not overwrite data for existing nodes in
5131                                 # self.mydbapi since that data will be used for blocker
5132                                 # validation.
5133                                 # Even though the graph is now invalid, continue to process
5134                                 # dependencies so that things like --fetchonly can still
5135                                 # function despite collisions.
5136                                 pass
5137                         elif not previously_added:
5138                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5139                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5140                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5141
5142                         if not pkg.installed:
5143                                 # Allow this package to satisfy old-style virtuals in case it
5144                                 # doesn't already. Any pre-existing providers will be preferred
5145                                 # over this one.
5146                                 try:
5147                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5148                                         # For consistency, also update the global virtuals.
5149                                         settings = self.roots[pkg.root].settings
5150                                         settings.unlock()
5151                                         settings.setinst(pkg.cpv, pkg.metadata)
5152                                         settings.lock()
5153                                 except portage.exception.InvalidDependString, e:
5154                                         show_invalid_depstring_notice(
5155                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5156                                         del e
5157                                         return 0
5158
5159                 if arg_atoms:
5160                         self._set_nodes.add(pkg)
5161
5162                 # Do this even when addme is False (--onlydeps) so that the
5163                 # parent/child relationship is always known in case
5164                 # self._show_slot_collision_notice() needs to be called later.
5165                 self.digraph.add(pkg, myparent, priority=priority)
5166                 if dep.atom is not None and dep.parent is not None:
5167                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5168
5169                 if arg_atoms:
5170                         for parent_atom in arg_atoms:
5171                                 parent, atom = parent_atom
5172                                 self.digraph.add(pkg, parent, priority=priority)
5173                                 self._add_parent_atom(pkg, parent_atom)
5174
5175                 """ This section determines whether we go deeper into dependencies or not.
5176                     We want to go deeper on a few occasions:
5177                     Installing package A, we need to make sure package A's deps are met.
5178                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5179                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5180                 """
5181                 dep_stack = self._dep_stack
5182                 if "recurse" not in self.myparams:
5183                         return 1
5184                 elif pkg.installed and \
5185                         "deep" not in self.myparams:
5186                         dep_stack = self._ignored_deps
5187
5188                 self.spinner.update()
5189
5190                 if arg_atoms:
5191                         depth = 0
5192                 pkg.depth = depth
5193                 if not previously_added:
5194                         dep_stack.append(pkg)
5195                 return 1
5196
5197         def _add_parent_atom(self, pkg, parent_atom):
5198                 parent_atoms = self._parent_atoms.get(pkg)
5199                 if parent_atoms is None:
5200                         parent_atoms = set()
5201                         self._parent_atoms[pkg] = parent_atoms
5202                 parent_atoms.add(parent_atom)
5203
5204         def _add_slot_conflict(self, pkg):
5205                 self._slot_collision_nodes.add(pkg)
5206                 slot_key = (pkg.slot_atom, pkg.root)
5207                 slot_nodes = self._slot_collision_info.get(slot_key)
5208                 if slot_nodes is None:
5209                         slot_nodes = set()
5210                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5211                         self._slot_collision_info[slot_key] = slot_nodes
5212                 slot_nodes.add(pkg)
5213
5214         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5215
5216                 mytype = pkg.type_name
5217                 myroot = pkg.root
5218                 mykey = pkg.cpv
5219                 metadata = pkg.metadata
5220                 myuse = pkg.use.enabled
5221                 jbigkey = pkg
5222                 depth = pkg.depth + 1
5223                 removal_action = "remove" in self.myparams
5224
5225                 edepend={}
5226                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5227                 for k in depkeys:
5228                         edepend[k] = metadata[k]
5229
5230                 if not pkg.built and \
5231                         "--buildpkgonly" in self.myopts and \
5232                         "deep" not in self.myparams and \
5233                         "empty" not in self.myparams:
5234                         edepend["RDEPEND"] = ""
5235                         edepend["PDEPEND"] = ""
5236                 bdeps_optional = False
5237
5238                 if pkg.built and not removal_action:
5239                         if self.myopts.get("--with-bdeps", "n") == "y":
5240                                 # Pull in build time deps as requested, but marked them as
5241                                 # "optional" since they are not strictly required. This allows
5242                                 # more freedom in the merge order calculation for solving
5243                                 # circular dependencies. Don't convert to PDEPEND since that
5244                                 # could make --with-bdeps=y less effective if it is used to
5245                                 # adjust merge order to prevent built_with_use() calls from
5246                                 # failing.
5247                                 bdeps_optional = True
5248                         else:
5249                                 # built packages do not have build time dependencies.
5250                                 edepend["DEPEND"] = ""
5251
5252                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5253                         edepend["DEPEND"] = ""
5254
5255                 deps = (
5256                         ("/", edepend["DEPEND"],
5257                                 self._priority(buildtime=(not bdeps_optional),
5258                                 optional=bdeps_optional)),
5259                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5260                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5261                 )
5262
5263                 debug = "--debug" in self.myopts
5264                 strict = mytype != "installed"
5265                 try:
5266                         for dep_root, dep_string, dep_priority in deps:
5267                                 if not dep_string:
5268                                         continue
5269                                 if debug:
5270                                         print
5271                                         print "Parent:   ", jbigkey
5272                                         print "Depstring:", dep_string
5273                                         print "Priority:", dep_priority
5274                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5275                                 try:
5276                                         selected_atoms = self._select_atoms(dep_root,
5277                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5278                                                 priority=dep_priority)
5279                                 except portage.exception.InvalidDependString, e:
5280                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5281                                         return 0
5282                                 if debug:
5283                                         print "Candidates:", selected_atoms
5284
5285                                 for atom in selected_atoms:
5286                                         try:
5287
5288                                                 atom = portage.dep.Atom(atom)
5289
5290                                                 mypriority = dep_priority.copy()
5291                                                 if not atom.blocker and vardb.match(atom):
5292                                                         mypriority.satisfied = True
5293
5294                                                 if not self._add_dep(Dependency(atom=atom,
5295                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5296                                                         priority=mypriority, root=dep_root),
5297                                                         allow_unsatisfied=allow_unsatisfied):
5298                                                         return 0
5299
5300                                         except portage.exception.InvalidAtom, e:
5301                                                 show_invalid_depstring_notice(
5302                                                         pkg, dep_string, str(e))
5303                                                 del e
5304                                                 if not pkg.installed:
5305                                                         return 0
5306
5307                                 if debug:
5308                                         print "Exiting...", jbigkey
5309                 except portage.exception.AmbiguousPackageName, e:
5310                         pkgs = e.args[0]
5311                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5312                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5313                         for cpv in pkgs:
5314                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5315                         portage.writemsg("\n", noiselevel=-1)
5316                         if mytype == "binary":
5317                                 portage.writemsg(
5318                                         "!!! This binary package cannot be installed: '%s'\n" % \
5319                                         mykey, noiselevel=-1)
5320                         elif mytype == "ebuild":
5321                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5322                                 myebuild, mylocation = portdb.findname2(mykey)
5323                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5324                                         "'%s'\n" % myebuild, noiselevel=-1)
5325                         portage.writemsg("!!! Please notify the package maintainer " + \
5326                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5327                         return 0
5328                 return 1
5329
5330         def _priority(self, **kwargs):
5331                 if "remove" in self.myparams:
5332                         priority_constructor = UnmergeDepPriority
5333                 else:
5334                         priority_constructor = DepPriority
5335                 return priority_constructor(**kwargs)
5336
5337         def _dep_expand(self, root_config, atom_without_category):
5338                 """
5339                 @param root_config: a root config instance
5340                 @type root_config: RootConfig
5341                 @param atom_without_category: an atom without a category component
5342                 @type atom_without_category: String
5343                 @rtype: list
5344                 @returns: a list of atoms containing categories (possibly empty)
5345                 """
5346                 null_cp = portage.dep_getkey(insert_category_into_atom(
5347                         atom_without_category, "null"))
5348                 cat, atom_pn = portage.catsplit(null_cp)
5349
5350                 dbs = self._filtered_trees[root_config.root]["dbs"]
5351                 categories = set()
5352                 for db, pkg_type, built, installed, db_keys in dbs:
5353                         for cat in db.categories:
5354                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5355                                         categories.add(cat)
5356
5357                 deps = []
5358                 for cat in categories:
5359                         deps.append(insert_category_into_atom(
5360                                 atom_without_category, cat))
5361                 return deps
5362
5363         def _have_new_virt(self, root, atom_cp):
5364                 ret = False
5365                 for db, pkg_type, built, installed, db_keys in \
5366                         self._filtered_trees[root]["dbs"]:
5367                         if db.cp_list(atom_cp):
5368                                 ret = True
5369                                 break
5370                 return ret
5371
5372         def _iter_atoms_for_pkg(self, pkg):
5373                 # TODO: add multiple $ROOT support
5374                 if pkg.root != self.target_root:
5375                         return
5376                 atom_arg_map = self._atom_arg_map
5377                 root_config = self.roots[pkg.root]
5378                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5379                         atom_cp = portage.dep_getkey(atom)
5380                         if atom_cp != pkg.cp and \
5381                                 self._have_new_virt(pkg.root, atom_cp):
5382                                 continue
5383                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5384                         visible_pkgs.reverse() # descending order
5385                         higher_slot = None
5386                         for visible_pkg in visible_pkgs:
5387                                 if visible_pkg.cp != atom_cp:
5388                                         continue
5389                                 if pkg >= visible_pkg:
5390                                         # This is descending order, and we're not
5391                                         # interested in any versions <= pkg given.
5392                                         break
5393                                 if pkg.slot_atom != visible_pkg.slot_atom:
5394                                         higher_slot = visible_pkg
5395                                         break
5396                         if higher_slot is not None:
5397                                 continue
5398                         for arg in atom_arg_map[(atom, pkg.root)]:
5399                                 if isinstance(arg, PackageArg) and \
5400                                         arg.package != pkg:
5401                                         continue
5402                                 yield arg, atom
5403
5404         def select_files(self, myfiles):
5405                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5406                 appropriate depgraph and return a favorite list."""
5407                 debug = "--debug" in self.myopts
5408                 root_config = self.roots[self.target_root]
5409                 sets = root_config.sets
5410                 getSetAtoms = root_config.setconfig.getSetAtoms
5411                 myfavorites=[]
5412                 myroot = self.target_root
5413                 dbs = self._filtered_trees[myroot]["dbs"]
5414                 vardb = self.trees[myroot]["vartree"].dbapi
5415                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5416                 portdb = self.trees[myroot]["porttree"].dbapi
5417                 bindb = self.trees[myroot]["bintree"].dbapi
5418                 pkgsettings = self.pkgsettings[myroot]
5419                 args = []
5420                 onlydeps = "--onlydeps" in self.myopts
5421                 lookup_owners = []
5422                 for x in myfiles:
5423                         ext = os.path.splitext(x)[1]
5424                         if ext==".tbz2":
5425                                 if not os.path.exists(x):
5426                                         if os.path.exists(
5427                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5428                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5429                                         elif os.path.exists(
5430                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5431                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5432                                         else:
5433                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5434                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5435                                                 return 0, myfavorites
5436                                 mytbz2=portage.xpak.tbz2(x)
5437                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5438                                 if os.path.realpath(x) != \
5439                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5440                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5441                                         return 0, myfavorites
5442                                 db_keys = list(bindb._aux_cache_keys)
5443                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5444                                 pkg = Package(type_name="binary", root_config=root_config,
5445                                         cpv=mykey, built=True, metadata=metadata,
5446                                         onlydeps=onlydeps)
5447                                 self._pkg_cache[pkg] = pkg
5448                                 args.append(PackageArg(arg=x, package=pkg,
5449                                         root_config=root_config))
5450                         elif ext==".ebuild":
5451                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5452                                 pkgdir = os.path.dirname(ebuild_path)
5453                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5454                                 cp = pkgdir[len(tree_root)+1:]
5455                                 e = portage.exception.PackageNotFound(
5456                                         ("%s is not in a valid portage tree " + \
5457                                         "hierarchy or does not exist") % x)
5458                                 if not portage.isvalidatom(cp):
5459                                         raise e
5460                                 cat = portage.catsplit(cp)[0]
5461                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5462                                 if not portage.isvalidatom("="+mykey):
5463                                         raise e
5464                                 ebuild_path = portdb.findname(mykey)
5465                                 if ebuild_path:
5466                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5467                                                 cp, os.path.basename(ebuild_path)):
5468                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5469                                                 return 0, myfavorites
5470                                         if mykey not in portdb.xmatch(
5471                                                 "match-visible", portage.dep_getkey(mykey)):
5472                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5473                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5474                                                 print colorize("BAD", "*** page for details.")
5475                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5476                                                         "Continuing...")
5477                                 else:
5478                                         raise portage.exception.PackageNotFound(
5479                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5480                                 db_keys = list(portdb._aux_cache_keys)
5481                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5482                                 pkg = Package(type_name="ebuild", root_config=root_config,
5483                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5484                                 pkgsettings.setcpv(pkg)
5485                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5486                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5487                                 self._pkg_cache[pkg] = pkg
5488                                 args.append(PackageArg(arg=x, package=pkg,
5489                                         root_config=root_config))
5490                         elif x.startswith(os.path.sep):
5491                                 if not x.startswith(myroot):
5492                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5493                                                 " $ROOT.\n") % x, noiselevel=-1)
5494                                         return 0, []
5495                                 # Queue these up since it's most efficient to handle
5496                                 # multiple files in a single iter_owners() call.
5497                                 lookup_owners.append(x)
5498                         else:
5499                                 if x in ("system", "world"):
5500                                         x = SETPREFIX + x
5501                                 if x.startswith(SETPREFIX):
5502                                         s = x[len(SETPREFIX):]
5503                                         if s not in sets:
5504                                                 raise portage.exception.PackageSetNotFound(s)
5505                                         if s in self._sets:
5506                                                 continue
5507                                         # Recursively expand sets so that containment tests in
5508                                         # self._get_parent_sets() properly match atoms in nested
5509                                         # sets (like if world contains system).
5510                                         expanded_set = InternalPackageSet(
5511                                                 initial_atoms=getSetAtoms(s))
5512                                         self._sets[s] = expanded_set
5513                                         args.append(SetArg(arg=x, set=expanded_set,
5514                                                 root_config=root_config))
5515                                         continue
5516                                 if not is_valid_package_atom(x):
5517                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5518                                                 noiselevel=-1)
5519                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5520                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5521                                         return (0,[])
5522                                 # Don't expand categories or old-style virtuals here unless
5523                                 # necessary. Expansion of old-style virtuals here causes at
5524                                 # least the following problems:
5525                                 #   1) It's more difficult to determine which set(s) an atom
5526                                 #      came from, if any.
5527                                 #   2) It takes away freedom from the resolver to choose other
5528                                 #      possible expansions when necessary.
5529                                 if "/" in x:
5530                                         args.append(AtomArg(arg=x, atom=x,
5531                                                 root_config=root_config))
5532                                         continue
5533                                 expanded_atoms = self._dep_expand(root_config, x)
5534                                 installed_cp_set = set()
5535                                 for atom in expanded_atoms:
5536                                         atom_cp = portage.dep_getkey(atom)
5537                                         if vardb.cp_list(atom_cp):
5538                                                 installed_cp_set.add(atom_cp)
5539                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5540                                         installed_cp = iter(installed_cp_set).next()
5541                                         expanded_atoms = [atom for atom in expanded_atoms \
5542                                                 if portage.dep_getkey(atom) == installed_cp]
5543
5544                                 if len(expanded_atoms) > 1:
5545                                         print
5546                                         print
5547                                         ambiguous_package_name(x, expanded_atoms, root_config,
5548                                                 self.spinner, self.myopts)
5549                                         return False, myfavorites
5550                                 if expanded_atoms:
5551                                         atom = expanded_atoms[0]
5552                                 else:
5553                                         null_atom = insert_category_into_atom(x, "null")
5554                                         null_cp = portage.dep_getkey(null_atom)
5555                                         cat, atom_pn = portage.catsplit(null_cp)
5556                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5557                                         if virts_p:
5558                                                 # Allow the depgraph to choose which virtual.
5559                                                 atom = insert_category_into_atom(x, "virtual")
5560                                         else:
5561                                                 atom = insert_category_into_atom(x, "null")
5562
5563                                 args.append(AtomArg(arg=x, atom=atom,
5564                                         root_config=root_config))
5565
5566                 if lookup_owners:
5567                         relative_paths = []
5568                         search_for_multiple = False
5569                         if len(lookup_owners) > 1:
5570                                 search_for_multiple = True
5571
5572                         for x in lookup_owners:
5573                                 if not search_for_multiple and os.path.isdir(x):
5574                                         search_for_multiple = True
5575                                 relative_paths.append(x[len(myroot):])
5576
5577                         owners = set()
5578                         for pkg, relative_path in \
5579                                 real_vardb._owners.iter_owners(relative_paths):
5580                                 owners.add(pkg.mycpv)
5581                                 if not search_for_multiple:
5582                                         break
5583
5584                         if not owners:
5585                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5586                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5587                                 return 0, []
5588
5589                         for cpv in owners:
5590                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5591                                 if not slot:
5592                                         # portage now masks packages with missing slot, but it's
5593                                         # possible that one was installed by an older version
5594                                         atom = portage.cpv_getkey(cpv)
5595                                 else:
5596                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5597                                 args.append(AtomArg(arg=atom, atom=atom,
5598                                         root_config=root_config))
5599
5600                 if "--update" in self.myopts:
5601                         # In some cases, the greedy slots behavior can pull in a slot that
5602                         # the user would want to uninstall due to it being blocked by a
5603                         # newer version in a different slot. Therefore, it's necessary to
5604                         # detect and discard any that should be uninstalled. Each time
5605                         # that arguments are updated, package selections are repeated in
5606                         # order to ensure consistency with the current arguments:
5607                         #
5608                         #  1) Initialize args
5609                         #  2) Select packages and generate initial greedy atoms
5610                         #  3) Update args with greedy atoms
5611                         #  4) Select packages and generate greedy atoms again, while
5612                         #     accounting for any blockers between selected packages
5613                         #  5) Update args with revised greedy atoms
5614
5615                         self._set_args(args)
5616                         greedy_args = []
5617                         for arg in args:
5618                                 greedy_args.append(arg)
5619                                 if not isinstance(arg, AtomArg):
5620                                         continue
5621                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5622                                         greedy_args.append(
5623                                                 AtomArg(arg=arg.arg, atom=atom,
5624                                                         root_config=arg.root_config))
5625
5626                         self._set_args(greedy_args)
5627                         del greedy_args
5628
5629                         # Revise greedy atoms, accounting for any blockers
5630                         # between selected packages.
5631                         revised_greedy_args = []
5632                         for arg in args:
5633                                 revised_greedy_args.append(arg)
5634                                 if not isinstance(arg, AtomArg):
5635                                         continue
5636                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5637                                         blocker_lookahead=True):
5638                                         revised_greedy_args.append(
5639                                                 AtomArg(arg=arg.arg, atom=atom,
5640                                                         root_config=arg.root_config))
5641                         args = revised_greedy_args
5642                         del revised_greedy_args
5643
5644                 self._set_args(args)
5645
5646                 myfavorites = set(myfavorites)
5647                 for arg in args:
5648                         if isinstance(arg, (AtomArg, PackageArg)):
5649                                 myfavorites.add(arg.atom)
5650                         elif isinstance(arg, SetArg):
5651                                 myfavorites.add(arg.arg)
5652                 myfavorites = list(myfavorites)
5653
5654                 pprovideddict = pkgsettings.pprovideddict
5655                 if debug:
5656                         portage.writemsg("\n", noiselevel=-1)
5657                 # Order needs to be preserved since a feature of --nodeps
5658                 # is to allow the user to force a specific merge order.
5659                 args.reverse()
5660                 while args:
5661                         arg = args.pop()
5662                         for atom in arg.set:
5663                                 self.spinner.update()
5664                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5665                                         root=myroot, parent=arg)
5666                                 atom_cp = portage.dep_getkey(atom)
5667                                 try:
5668                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5669                                         if pprovided and portage.match_from_list(atom, pprovided):
5670                                                 # A provided package has been specified on the command line.
5671                                                 self._pprovided_args.append((arg, atom))
5672                                                 continue
5673                                         if isinstance(arg, PackageArg):
5674                                                 if not self._add_pkg(arg.package, dep) or \
5675                                                         not self._create_graph():
5676                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5677                                                                 "dependencies for %s\n") % arg.arg)
5678                                                         return 0, myfavorites
5679                                                 continue
5680                                         if debug:
5681                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5682                                                         (arg, atom), noiselevel=-1)
5683                                         pkg, existing_node = self._select_package(
5684                                                 myroot, atom, onlydeps=onlydeps)
5685                                         if not pkg:
5686                                                 if not (isinstance(arg, SetArg) and \
5687                                                         arg.name in ("system", "world")):
5688                                                         self._unsatisfied_deps_for_display.append(
5689                                                                 ((myroot, atom), {}))
5690                                                         return 0, myfavorites
5691                                                 self._missing_args.append((arg, atom))
5692                                                 continue
5693                                         if atom_cp != pkg.cp:
5694                                                 # For old-style virtuals, we need to repeat the
5695                                                 # package.provided check against the selected package.
5696                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5697                                                 pprovided = pprovideddict.get(pkg.cp)
5698                                                 if pprovided and \
5699                                                         portage.match_from_list(expanded_atom, pprovided):
5700                                                         # A provided package has been
5701                                                         # specified on the command line.
5702                                                         self._pprovided_args.append((arg, atom))
5703                                                         continue
5704                                         if pkg.installed and "selective" not in self.myparams:
5705                                                 self._unsatisfied_deps_for_display.append(
5706                                                         ((myroot, atom), {}))
5707                                                 # Previous behavior was to bail out in this case, but
5708                                                 # since the dep is satisfied by the installed package,
5709                                                 # it's more friendly to continue building the graph
5710                                                 # and just show a warning message. Therefore, only bail
5711                                                 # out here if the atom is not from either the system or
5712                                                 # world set.
5713                                                 if not (isinstance(arg, SetArg) and \
5714                                                         arg.name in ("system", "world")):
5715                                                         return 0, myfavorites
5716
5717                                         # Add the selected package to the graph as soon as possible
5718                                         # so that later dep_check() calls can use it as feedback
5719                                         # for making more consistent atom selections.
5720                                         if not self._add_pkg(pkg, dep):
5721                                                 if isinstance(arg, SetArg):
5722                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5723                                                                 "dependencies for %s from %s\n") % \
5724                                                                 (atom, arg.arg))
5725                                                 else:
5726                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5727                                                                 "dependencies for %s\n") % atom)
5728                                                 return 0, myfavorites
5729
5730                                 except portage.exception.MissingSignature, e:
5731                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5732                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5733                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5734                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5735                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5736                                         return 0, myfavorites
5737                                 except portage.exception.InvalidSignature, e:
5738                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5739                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5740                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5741                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5742                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5743                                         return 0, myfavorites
5744                                 except SystemExit, e:
5745                                         raise # Needed else can't exit
5746                                 except Exception, e:
5747                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5748                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5749                                         raise
5750
5751                 # Now that the root packages have been added to the graph,
5752                 # process the dependencies.
5753                 if not self._create_graph():
5754                         return 0, myfavorites
5755
5756                 missing=0
5757                 if "--usepkgonly" in self.myopts:
5758                         for xs in self.digraph.all_nodes():
5759                                 if not isinstance(xs, Package):
5760                                         continue
5761                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5762                                         if missing == 0:
5763                                                 print
5764                                         missing += 1
5765                                         print "Missing binary for:",xs[2]
5766
5767                 try:
5768                         self.altlist()
5769                 except self._unknown_internal_error:
5770                         return False, myfavorites
5771
5772                 # We're true here unless we are missing binaries.
5773                 return (not missing,myfavorites)
5774
5775         def _set_args(self, args):
5776                 """
5777                 Create the "args" package set from atoms and packages given as
5778                 arguments. This method can be called multiple times if necessary.
5779                 The package selection cache is automatically invalidated, since
5780                 arguments influence package selections.
5781                 """
5782                 args_set = self._sets["args"]
5783                 args_set.clear()
5784                 for arg in args:
5785                         if not isinstance(arg, (AtomArg, PackageArg)):
5786                                 continue
5787                         atom = arg.atom
5788                         if atom in args_set:
5789                                 continue
5790                         args_set.add(atom)
5791
5792                 self._set_atoms.clear()
5793                 self._set_atoms.update(chain(*self._sets.itervalues()))
5794                 atom_arg_map = self._atom_arg_map
5795                 atom_arg_map.clear()
5796                 for arg in args:
5797                         for atom in arg.set:
5798                                 atom_key = (atom, arg.root_config.root)
5799                                 refs = atom_arg_map.get(atom_key)
5800                                 if refs is None:
5801                                         refs = []
5802                                         atom_arg_map[atom_key] = refs
5803                                         if arg not in refs:
5804                                                 refs.append(arg)
5805
5806                 # Invalidate the package selection cache, since
5807                 # arguments influence package selections.
5808                 self._highest_pkg_cache.clear()
5809                 for trees in self._filtered_trees.itervalues():
5810                         trees["porttree"].dbapi._clear_cache()
5811
5812         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5813                 """
5814                 Return a list of slot atoms corresponding to installed slots that
5815                 differ from the slot of the highest visible match. When
5816                 blocker_lookahead is True, slot atoms that would trigger a blocker
5817                 conflict are automatically discarded, potentially allowing automatic
5818                 uninstallation of older slots when appropriate.
5819                 """
5820                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5821                 if highest_pkg is None:
5822                         return []
5823                 vardb = root_config.trees["vartree"].dbapi
5824                 slots = set()
5825                 for cpv in vardb.match(atom):
5826                         # don't mix new virtuals with old virtuals
5827                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5828                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5829
5830                 slots.add(highest_pkg.metadata["SLOT"])
5831                 if len(slots) == 1:
5832                         return []
5833                 greedy_pkgs = []
5834                 slots.remove(highest_pkg.metadata["SLOT"])
5835                 while slots:
5836                         slot = slots.pop()
5837                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5838                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5839                         if pkg is not None and \
5840                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5841                                 greedy_pkgs.append(pkg)
5842                 if not greedy_pkgs:
5843                         return []
5844                 if not blocker_lookahead:
5845                         return [pkg.slot_atom for pkg in greedy_pkgs]
5846
5847                 blockers = {}
5848                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5849                 for pkg in greedy_pkgs + [highest_pkg]:
5850                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5851                         try:
5852                                 atoms = self._select_atoms(
5853                                         pkg.root, dep_str, pkg.use.enabled,
5854                                         parent=pkg, strict=True)
5855                         except portage.exception.InvalidDependString:
5856                                 continue
5857                         blocker_atoms = (x for x in atoms if x.blocker)
5858                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5859
5860                 if highest_pkg not in blockers:
5861                         return []
5862
5863                 # filter packages with invalid deps
5864                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5865
5866                 # filter packages that conflict with highest_pkg
5867                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5868                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5869                         blockers[pkg].findAtomForPackage(highest_pkg))]
5870
5871                 if not greedy_pkgs:
5872                         return []
5873
5874                 # If two packages conflict, discard the lower version.
5875                 discard_pkgs = set()
5876                 greedy_pkgs.sort(reverse=True)
5877                 for i in xrange(len(greedy_pkgs) - 1):
5878                         pkg1 = greedy_pkgs[i]
5879                         if pkg1 in discard_pkgs:
5880                                 continue
5881                         for j in xrange(i + 1, len(greedy_pkgs)):
5882                                 pkg2 = greedy_pkgs[j]
5883                                 if pkg2 in discard_pkgs:
5884                                         continue
5885                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5886                                         blockers[pkg2].findAtomForPackage(pkg1):
5887                                         # pkg1 > pkg2
5888                                         discard_pkgs.add(pkg2)
5889
5890                 return [pkg.slot_atom for pkg in greedy_pkgs \
5891                         if pkg not in discard_pkgs]
5892
5893         def _select_atoms_from_graph(self, *pargs, **kwargs):
5894                 """
5895                 Prefer atoms matching packages that have already been
5896                 added to the graph or those that are installed and have
5897                 not been scheduled for replacement.
5898                 """
5899                 kwargs["trees"] = self._graph_trees
5900                 return self._select_atoms_highest_available(*pargs, **kwargs)
5901
5902         def _select_atoms_highest_available(self, root, depstring,
5903                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5904                 """This will raise InvalidDependString if necessary. If trees is
5905                 None then self._filtered_trees is used."""
5906                 pkgsettings = self.pkgsettings[root]
5907                 if trees is None:
5908                         trees = self._filtered_trees
5909                 if not getattr(priority, "buildtime", False):
5910                         # The parent should only be passed to dep_check() for buildtime
5911                         # dependencies since that's the only case when it's appropriate
5912                         # to trigger the circular dependency avoidance code which uses it.
5913                         # It's important not to trigger the same circular dependency
5914                         # avoidance code for runtime dependencies since it's not needed
5915                         # and it can promote an incorrect package choice.
5916                         parent = None
5917                 if True:
5918                         try:
5919                                 if parent is not None:
5920                                         trees[root]["parent"] = parent
5921                                 if not strict:
5922                                         portage.dep._dep_check_strict = False
5923                                 mycheck = portage.dep_check(depstring, None,
5924                                         pkgsettings, myuse=myuse,
5925                                         myroot=root, trees=trees)
5926                         finally:
5927                                 if parent is not None:
5928                                         trees[root].pop("parent")
5929                                 portage.dep._dep_check_strict = True
5930                         if not mycheck[0]:
5931                                 raise portage.exception.InvalidDependString(mycheck[1])
5932                         selected_atoms = mycheck[1]
5933                 return selected_atoms
5934
5935         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5936                 atom = portage.dep.Atom(atom)
5937                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5938                 atom_without_use = atom
5939                 if atom.use:
5940                         atom_without_use = portage.dep.remove_slot(atom)
5941                         if atom.slot:
5942                                 atom_without_use += ":" + atom.slot
5943                         atom_without_use = portage.dep.Atom(atom_without_use)
5944                 xinfo = '"%s"' % atom
5945                 if arg:
5946                         xinfo='"%s"' % arg
5947                 # Discard null/ from failed cpv_expand category expansion.
5948                 xinfo = xinfo.replace("null/", "")
5949                 masked_packages = []
5950                 missing_use = []
5951                 masked_pkg_instances = set()
5952                 missing_licenses = []
5953                 have_eapi_mask = False
5954                 pkgsettings = self.pkgsettings[root]
5955                 implicit_iuse = pkgsettings._get_implicit_iuse()
5956                 root_config = self.roots[root]
5957                 portdb = self.roots[root].trees["porttree"].dbapi
5958                 dbs = self._filtered_trees[root]["dbs"]
5959                 for db, pkg_type, built, installed, db_keys in dbs:
5960                         if installed:
5961                                 continue
5962                         match = db.match
5963                         if hasattr(db, "xmatch"):
5964                                 cpv_list = db.xmatch("match-all", atom_without_use)
5965                         else:
5966                                 cpv_list = db.match(atom_without_use)
5967                         # descending order
5968                         cpv_list.reverse()
5969                         for cpv in cpv_list:
5970                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5971                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5972                                 if metadata is not None:
5973                                         pkg = Package(built=built, cpv=cpv,
5974                                                 installed=installed, metadata=metadata,
5975                                                 root_config=root_config)
5976                                         if pkg.cp != atom.cp:
5977                                                 # A cpv can be returned from dbapi.match() as an
5978                                                 # old-style virtual match even in cases when the
5979                                                 # package does not actually PROVIDE the virtual.
5980                                                 # Filter out any such false matches here.
5981                                                 if not atom_set.findAtomForPackage(pkg):
5982                                                         continue
5983                                         if mreasons:
5984                                                 masked_pkg_instances.add(pkg)
5985                                         if atom.use:
5986                                                 missing_use.append(pkg)
5987                                                 if not mreasons:
5988                                                         continue
5989                                 masked_packages.append(
5990                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5991
5992                 missing_use_reasons = []
5993                 missing_iuse_reasons = []
5994                 for pkg in missing_use:
5995                         use = pkg.use.enabled
5996                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5997                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5998                         missing_iuse = []
5999                         for x in atom.use.required:
6000                                 if iuse_re.match(x) is None:
6001                                         missing_iuse.append(x)
6002                         mreasons = []
6003                         if missing_iuse:
6004                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6005                                 missing_iuse_reasons.append((pkg, mreasons))
6006                         else:
6007                                 need_enable = sorted(atom.use.enabled.difference(use))
6008                                 need_disable = sorted(atom.use.disabled.intersection(use))
6009                                 if need_enable or need_disable:
6010                                         changes = []
6011                                         changes.extend(colorize("red", "+" + x) \
6012                                                 for x in need_enable)
6013                                         changes.extend(colorize("blue", "-" + x) \
6014                                                 for x in need_disable)
6015                                         mreasons.append("Change USE: %s" % " ".join(changes))
6016                                         missing_use_reasons.append((pkg, mreasons))
6017
6018                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6019                         in missing_use_reasons if pkg not in masked_pkg_instances]
6020
6021                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6022                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6023
6024                 show_missing_use = False
6025                 if unmasked_use_reasons:
6026                         # Only show the latest version.
6027                         show_missing_use = unmasked_use_reasons[:1]
6028                 elif unmasked_iuse_reasons:
6029                         if missing_use_reasons:
6030                                 # All packages with required IUSE are masked,
6031                                 # so display a normal masking message.
6032                                 pass
6033                         else:
6034                                 show_missing_use = unmasked_iuse_reasons
6035
6036                 if show_missing_use:
6037                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6038                         print "!!! One of the following packages is required to complete your request:"
6039                         for pkg, mreasons in show_missing_use:
6040                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6041
6042                 elif masked_packages:
6043                         print "\n!!! " + \
6044                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6045                                 colorize("INFORM", xinfo) + \
6046                                 colorize("BAD", " have been masked.")
6047                         print "!!! One of the following masked packages is required to complete your request:"
6048                         have_eapi_mask = show_masked_packages(masked_packages)
6049                         if have_eapi_mask:
6050                                 print
6051                                 msg = ("The current version of portage supports " + \
6052                                         "EAPI '%s'. You must upgrade to a newer version" + \
6053                                         " of portage before EAPI masked packages can" + \
6054                                         " be installed.") % portage.const.EAPI
6055                                 from textwrap import wrap
6056                                 for line in wrap(msg, 75):
6057                                         print line
6058                         print
6059                         show_mask_docs()
6060                 else:
6061                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6062
6063                 # Show parent nodes and the argument that pulled them in.
6064                 traversed_nodes = set()
6065                 node = myparent
6066                 msg = []
6067                 while node is not None:
6068                         traversed_nodes.add(node)
6069                         msg.append('(dependency required by "%s" [%s])' % \
6070                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6071                         # When traversing to parents, prefer arguments over packages
6072                         # since arguments are root nodes. Never traverse the same
6073                         # package twice, in order to prevent an infinite loop.
6074                         selected_parent = None
6075                         for parent in self.digraph.parent_nodes(node):
6076                                 if isinstance(parent, DependencyArg):
6077                                         msg.append('(dependency required by "%s" [argument])' % \
6078                                                 (colorize('INFORM', str(parent))))
6079                                         selected_parent = None
6080                                         break
6081                                 if parent not in traversed_nodes:
6082                                         selected_parent = parent
6083                         node = selected_parent
6084                 for line in msg:
6085                         print line
6086
6087                 print
6088
6089         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6090                 cache_key = (root, atom, onlydeps)
6091                 ret = self._highest_pkg_cache.get(cache_key)
6092                 if ret is not None:
6093                         pkg, existing = ret
6094                         if pkg and not existing:
6095                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6096                                 if existing and existing == pkg:
6097                                         # Update the cache to reflect that the
6098                                         # package has been added to the graph.
6099                                         ret = pkg, pkg
6100                                         self._highest_pkg_cache[cache_key] = ret
6101                         return ret
6102                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6103                 self._highest_pkg_cache[cache_key] = ret
6104                 pkg, existing = ret
6105                 if pkg is not None:
6106                         settings = pkg.root_config.settings
6107                         if visible(settings, pkg) and not (pkg.installed and \
6108                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6109                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6110                 return ret
6111
6112         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6113                 root_config = self.roots[root]
6114                 pkgsettings = self.pkgsettings[root]
6115                 dbs = self._filtered_trees[root]["dbs"]
6116                 vardb = self.roots[root].trees["vartree"].dbapi
6117                 portdb = self.roots[root].trees["porttree"].dbapi
6118                 # List of acceptable packages, ordered by type preference.
6119                 matched_packages = []
6120                 highest_version = None
6121                 if not isinstance(atom, portage.dep.Atom):
6122                         atom = portage.dep.Atom(atom)
6123                 atom_cp = atom.cp
6124                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6125                 existing_node = None
6126                 myeb = None
6127                 usepkgonly = "--usepkgonly" in self.myopts
6128                 empty = "empty" in self.myparams
6129                 selective = "selective" in self.myparams
6130                 reinstall = False
6131                 noreplace = "--noreplace" in self.myopts
6132                 # Behavior of the "selective" parameter depends on
6133                 # whether or not a package matches an argument atom.
6134                 # If an installed package provides an old-style
6135                 # virtual that is no longer provided by an available
6136                 # package, the installed package may match an argument
6137                 # atom even though none of the available packages do.
6138                 # Therefore, "selective" logic does not consider
6139                 # whether or not an installed package matches an
6140                 # argument atom. It only considers whether or not
6141                 # available packages match argument atoms, which is
6142                 # represented by the found_available_arg flag.
6143                 found_available_arg = False
6144                 for find_existing_node in True, False:
6145                         if existing_node:
6146                                 break
6147                         for db, pkg_type, built, installed, db_keys in dbs:
6148                                 if existing_node:
6149                                         break
6150                                 if installed and not find_existing_node:
6151                                         want_reinstall = reinstall or empty or \
6152                                                 (found_available_arg and not selective)
6153                                         if want_reinstall and matched_packages:
6154                                                 continue
6155                                 if hasattr(db, "xmatch"):
6156                                         cpv_list = db.xmatch("match-all", atom)
6157                                 else:
6158                                         cpv_list = db.match(atom)
6159
6160                                 # USE=multislot can make an installed package appear as if
6161                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6162                                 # won't do any good as long as USE=multislot is enabled since
6163                                 # the newly built package still won't have the expected slot.
6164                                 # Therefore, assume that such SLOT dependencies are already
6165                                 # satisfied rather than forcing a rebuild.
6166                                 if installed and not cpv_list and atom.slot:
6167                                         for cpv in db.match(atom.cp):
6168                                                 slot_available = False
6169                                                 for other_db, other_type, other_built, \
6170                                                         other_installed, other_keys in dbs:
6171                                                         try:
6172                                                                 if atom.slot == \
6173                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6174                                                                         slot_available = True
6175                                                                         break
6176                                                         except KeyError:
6177                                                                 pass
6178                                                 if not slot_available:
6179                                                         continue
6180                                                 inst_pkg = self._pkg(cpv, "installed",
6181                                                         root_config, installed=installed)
6182                                                 # Remove the slot from the atom and verify that
6183                                                 # the package matches the resulting atom.
6184                                                 atom_without_slot = portage.dep.remove_slot(atom)
6185                                                 if atom.use:
6186                                                         atom_without_slot += str(atom.use)
6187                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6188                                                 if portage.match_from_list(
6189                                                         atom_without_slot, [inst_pkg]):
6190                                                         cpv_list = [inst_pkg.cpv]
6191                                                 break
6192
6193                                 if not cpv_list:
6194                                         continue
6195                                 pkg_status = "merge"
6196                                 if installed or onlydeps:
6197                                         pkg_status = "nomerge"
6198                                 # descending order
6199                                 cpv_list.reverse()
6200                                 for cpv in cpv_list:
6201                                         # Make --noreplace take precedence over --newuse.
6202                                         if not installed and noreplace and \
6203                                                 cpv in vardb.match(atom):
6204                                                 # If the installed version is masked, it may
6205                                                 # be necessary to look at lower versions,
6206                                                 # in case there is a visible downgrade.
6207                                                 continue
6208                                         reinstall_for_flags = None
6209                                         cache_key = (pkg_type, root, cpv, pkg_status)
6210                                         calculated_use = True
6211                                         pkg = self._pkg_cache.get(cache_key)
6212                                         if pkg is None:
6213                                                 calculated_use = False
6214                                                 try:
6215                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6216                                                 except KeyError:
6217                                                         continue
6218                                                 pkg = Package(built=built, cpv=cpv,
6219                                                         installed=installed, metadata=metadata,
6220                                                         onlydeps=onlydeps, root_config=root_config,
6221                                                         type_name=pkg_type)
6222                                                 metadata = pkg.metadata
6223                                                 if not built:
6224                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6225                                                 if not built and ("?" in metadata["LICENSE"] or \
6226                                                         "?" in metadata["PROVIDE"]):
6227                                                         # This is avoided whenever possible because
6228                                                         # it's expensive. It only needs to be done here
6229                                                         # if it has an effect on visibility.
6230                                                         pkgsettings.setcpv(pkg)
6231                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6232                                                         calculated_use = True
6233                                                 self._pkg_cache[pkg] = pkg
6234
6235                                         if not installed or (built and matched_packages):
6236                                                 # Only enforce visibility on installed packages
6237                                                 # if there is at least one other visible package
6238                                                 # available. By filtering installed masked packages
6239                                                 # here, packages that have been masked since they
6240                                                 # were installed can be automatically downgraded
6241                                                 # to an unmasked version.
6242                                                 try:
6243                                                         if not visible(pkgsettings, pkg):
6244                                                                 continue
6245                                                 except portage.exception.InvalidDependString:
6246                                                         if not installed:
6247                                                                 continue
6248
6249                                                 # Enable upgrade or downgrade to a version
6250                                                 # with visible KEYWORDS when the installed
6251                                                 # version is masked by KEYWORDS, but never
6252                                                 # reinstall the same exact version only due
6253                                                 # to a KEYWORDS mask.
6254                                                 if built and matched_packages:
6255
6256                                                         different_version = None
6257                                                         for avail_pkg in matched_packages:
6258                                                                 if not portage.dep.cpvequal(
6259                                                                         pkg.cpv, avail_pkg.cpv):
6260                                                                         different_version = avail_pkg
6261                                                                         break
6262                                                         if different_version is not None:
6263
6264                                                                 if installed and \
6265                                                                         pkgsettings._getMissingKeywords(
6266                                                                         pkg.cpv, pkg.metadata):
6267                                                                         continue
6268
6269                                                                 # If the ebuild no longer exists or it's
6270                                                                 # keywords have been dropped, reject built
6271                                                                 # instances (installed or binary).
6272                                                                 # If --usepkgonly is enabled, assume that
6273                                                                 # the ebuild status should be ignored.
6274                                                                 if not usepkgonly:
6275                                                                         try:
6276                                                                                 pkg_eb = self._pkg(
6277                                                                                         pkg.cpv, "ebuild", root_config)
6278                                                                         except portage.exception.PackageNotFound:
6279                                                                                 continue
6280                                                                         else:
6281                                                                                 if not visible(pkgsettings, pkg_eb):
6282                                                                                         continue
6283
6284                                         if not pkg.built and not calculated_use:
6285                                                 # This is avoided whenever possible because
6286                                                 # it's expensive.
6287                                                 pkgsettings.setcpv(pkg)
6288                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6289
6290                                         if pkg.cp != atom.cp:
6291                                                 # A cpv can be returned from dbapi.match() as an
6292                                                 # old-style virtual match even in cases when the
6293                                                 # package does not actually PROVIDE the virtual.
6294                                                 # Filter out any such false matches here.
6295                                                 if not atom_set.findAtomForPackage(pkg):
6296                                                         continue
6297
6298                                         myarg = None
6299                                         if root == self.target_root:
6300                                                 try:
6301                                                         # Ebuild USE must have been calculated prior
6302                                                         # to this point, in case atoms have USE deps.
6303                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6304                                                 except StopIteration:
6305                                                         pass
6306                                                 except portage.exception.InvalidDependString:
6307                                                         if not installed:
6308                                                                 # masked by corruption
6309                                                                 continue
6310                                         if not installed and myarg:
6311                                                 found_available_arg = True
6312
6313                                         if atom.use and not pkg.built:
6314                                                 use = pkg.use.enabled
6315                                                 if atom.use.enabled.difference(use):
6316                                                         continue
6317                                                 if atom.use.disabled.intersection(use):
6318                                                         continue
6319                                         if pkg.cp == atom_cp:
6320                                                 if highest_version is None:
6321                                                         highest_version = pkg
6322                                                 elif pkg > highest_version:
6323                                                         highest_version = pkg
6324                                         # At this point, we've found the highest visible
6325                                         # match from the current repo. Any lower versions
6326                                         # from this repo are ignored, so this so the loop
6327                                         # will always end with a break statement below
6328                                         # this point.
6329                                         if find_existing_node:
6330                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6331                                                 if not e_pkg:
6332                                                         break
6333                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6334                                                         if highest_version and \
6335                                                                 e_pkg.cp == atom_cp and \
6336                                                                 e_pkg < highest_version and \
6337                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6338                                                                 # There is a higher version available in a
6339                                                                 # different slot, so this existing node is
6340                                                                 # irrelevant.
6341                                                                 pass
6342                                                         else:
6343                                                                 matched_packages.append(e_pkg)
6344                                                                 existing_node = e_pkg
6345                                                 break
6346                                         # Compare built package to current config and
6347                                         # reject the built package if necessary.
6348                                         if built and not installed and \
6349                                                 ("--newuse" in self.myopts or \
6350                                                 "--reinstall" in self.myopts):
6351                                                 iuses = pkg.iuse.all
6352                                                 old_use = pkg.use.enabled
6353                                                 if myeb:
6354                                                         pkgsettings.setcpv(myeb)
6355                                                 else:
6356                                                         pkgsettings.setcpv(pkg)
6357                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6358                                                 forced_flags = set()
6359                                                 forced_flags.update(pkgsettings.useforce)
6360                                                 forced_flags.update(pkgsettings.usemask)
6361                                                 cur_iuse = iuses
6362                                                 if myeb and not usepkgonly:
6363                                                         cur_iuse = myeb.iuse.all
6364                                                 if self._reinstall_for_flags(forced_flags,
6365                                                         old_use, iuses,
6366                                                         now_use, cur_iuse):
6367                                                         break
6368                                         # Compare current config to installed package
6369                                         # and do not reinstall if possible.
6370                                         if not installed and \
6371                                                 ("--newuse" in self.myopts or \
6372                                                 "--reinstall" in self.myopts) and \
6373                                                 cpv in vardb.match(atom):
6374                                                 pkgsettings.setcpv(pkg)
6375                                                 forced_flags = set()
6376                                                 forced_flags.update(pkgsettings.useforce)
6377                                                 forced_flags.update(pkgsettings.usemask)
6378                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6379                                                 old_iuse = set(filter_iuse_defaults(
6380                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6381                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6382                                                 cur_iuse = pkg.iuse.all
6383                                                 reinstall_for_flags = \
6384                                                         self._reinstall_for_flags(
6385                                                         forced_flags, old_use, old_iuse,
6386                                                         cur_use, cur_iuse)
6387                                                 if reinstall_for_flags:
6388                                                         reinstall = True
6389                                         if not built:
6390                                                 myeb = pkg
6391                                         matched_packages.append(pkg)
6392                                         if reinstall_for_flags:
6393                                                 self._reinstall_nodes[pkg] = \
6394                                                         reinstall_for_flags
6395                                         break
6396
6397                 if not matched_packages:
6398                         return None, None
6399
6400                 if "--debug" in self.myopts:
6401                         for pkg in matched_packages:
6402                                 portage.writemsg("%s %s\n" % \
6403                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6404
6405                 # Filter out any old-style virtual matches if they are
6406                 # mixed with new-style virtual matches.
6407                 cp = portage.dep_getkey(atom)
6408                 if len(matched_packages) > 1 and \
6409                         "virtual" == portage.catsplit(cp)[0]:
6410                         for pkg in matched_packages:
6411                                 if pkg.cp != cp:
6412                                         continue
6413                                 # Got a new-style virtual, so filter
6414                                 # out any old-style virtuals.
6415                                 matched_packages = [pkg for pkg in matched_packages \
6416                                         if pkg.cp == cp]
6417                                 break
6418
6419                 if len(matched_packages) > 1:
6420                         bestmatch = portage.best(
6421                                 [pkg.cpv for pkg in matched_packages])
6422                         matched_packages = [pkg for pkg in matched_packages \
6423                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6424
6425                 # ordered by type preference ("ebuild" type is the last resort)
6426                 return  matched_packages[-1], existing_node
6427
6428         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6429                 """
6430                 Select packages that have already been added to the graph or
6431                 those that are installed and have not been scheduled for
6432                 replacement.
6433                 """
6434                 graph_db = self._graph_trees[root]["porttree"].dbapi
6435                 matches = graph_db.match_pkgs(atom)
6436                 if not matches:
6437                         return None, None
6438                 pkg = matches[-1] # highest match
6439                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6440                 return pkg, in_graph
6441
6442         def _complete_graph(self):
6443                 """
6444                 Add any deep dependencies of required sets (args, system, world) that
6445                 have not been pulled into the graph yet. This ensures that the graph
6446                 is consistent such that initially satisfied deep dependencies are not
6447                 broken in the new graph. Initially unsatisfied dependencies are
6448                 irrelevant since we only want to avoid breaking dependencies that are
6449                 intially satisfied.
6450
6451                 Since this method can consume enough time to disturb users, it is
6452                 currently only enabled by the --complete-graph option.
6453                 """
6454                 if "--buildpkgonly" in self.myopts or \
6455                         "recurse" not in self.myparams:
6456                         return 1
6457
6458                 if "complete" not in self.myparams:
6459                         # Skip this to avoid consuming enough time to disturb users.
6460                         return 1
6461
6462                 # Put the depgraph into a mode that causes it to only
6463                 # select packages that have already been added to the
6464                 # graph or those that are installed and have not been
6465                 # scheduled for replacement. Also, toggle the "deep"
6466                 # parameter so that all dependencies are traversed and
6467                 # accounted for.
6468                 self._select_atoms = self._select_atoms_from_graph
6469                 self._select_package = self._select_pkg_from_graph
6470                 already_deep = "deep" in self.myparams
6471                 if not already_deep:
6472                         self.myparams.add("deep")
6473
6474                 for root in self.roots:
6475                         required_set_names = self._required_set_names.copy()
6476                         if root == self.target_root and \
6477                                 (already_deep or "empty" in self.myparams):
6478                                 required_set_names.difference_update(self._sets)
6479                         if not required_set_names and not self._ignored_deps:
6480                                 continue
6481                         root_config = self.roots[root]
6482                         setconfig = root_config.setconfig
6483                         args = []
6484                         # Reuse existing SetArg instances when available.
6485                         for arg in self.digraph.root_nodes():
6486                                 if not isinstance(arg, SetArg):
6487                                         continue
6488                                 if arg.root_config != root_config:
6489                                         continue
6490                                 if arg.name in required_set_names:
6491                                         args.append(arg)
6492                                         required_set_names.remove(arg.name)
6493                         # Create new SetArg instances only when necessary.
6494                         for s in required_set_names:
6495                                 expanded_set = InternalPackageSet(
6496                                         initial_atoms=setconfig.getSetAtoms(s))
6497                                 atom = SETPREFIX + s
6498                                 args.append(SetArg(arg=atom, set=expanded_set,
6499                                         root_config=root_config))
6500                         vardb = root_config.trees["vartree"].dbapi
6501                         for arg in args:
6502                                 for atom in arg.set:
6503                                         self._dep_stack.append(
6504                                                 Dependency(atom=atom, root=root, parent=arg))
6505                         if self._ignored_deps:
6506                                 self._dep_stack.extend(self._ignored_deps)
6507                                 self._ignored_deps = []
6508                         if not self._create_graph(allow_unsatisfied=True):
6509                                 return 0
6510                         # Check the unsatisfied deps to see if any initially satisfied deps
6511                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6512                         # deps are irrelevant since we only want to avoid breaking deps
6513                         # that are initially satisfied.
6514                         while self._unsatisfied_deps:
6515                                 dep = self._unsatisfied_deps.pop()
6516                                 matches = vardb.match_pkgs(dep.atom)
6517                                 if not matches:
6518                                         self._initially_unsatisfied_deps.append(dep)
6519                                         continue
6520                                 # An scheduled installation broke a deep dependency.
6521                                 # Add the installed package to the graph so that it
6522                                 # will be appropriately reported as a slot collision
6523                                 # (possibly solvable via backtracking).
6524                                 pkg = matches[-1] # highest match
6525                                 if not self._add_pkg(pkg, dep):
6526                                         return 0
6527                                 if not self._create_graph(allow_unsatisfied=True):
6528                                         return 0
6529                 return 1
6530
6531         def _pkg(self, cpv, type_name, root_config, installed=False):
6532                 """
6533                 Get a package instance from the cache, or create a new
6534                 one if necessary. Raises KeyError from aux_get if it
6535                 failures for some reason (package does not exist or is
6536                 corrupt).
6537                 """
6538                 operation = "merge"
6539                 if installed:
6540                         operation = "nomerge"
6541                 pkg = self._pkg_cache.get(
6542                         (type_name, root_config.root, cpv, operation))
6543                 if pkg is None:
6544                         tree_type = self.pkg_tree_map[type_name]
6545                         db = root_config.trees[tree_type].dbapi
6546                         db_keys = list(self._trees_orig[root_config.root][
6547                                 tree_type].dbapi._aux_cache_keys)
6548                         try:
6549                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6550                         except KeyError:
6551                                 raise portage.exception.PackageNotFound(cpv)
6552                         pkg = Package(cpv=cpv, metadata=metadata,
6553                                 root_config=root_config, installed=installed)
6554                         if type_name == "ebuild":
6555                                 settings = self.pkgsettings[root_config.root]
6556                                 settings.setcpv(pkg)
6557                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6558                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6559                         self._pkg_cache[pkg] = pkg
6560                 return pkg
6561
6562         def validate_blockers(self):
6563                 """Remove any blockers from the digraph that do not match any of the
6564                 packages within the graph.  If necessary, create hard deps to ensure
6565                 correct merge order such that mutually blocking packages are never
6566                 installed simultaneously."""
6567
6568                 if "--buildpkgonly" in self.myopts or \
6569                         "--nodeps" in self.myopts:
6570                         return True
6571
6572                 #if "deep" in self.myparams:
6573                 if True:
6574                         # Pull in blockers from all installed packages that haven't already
6575                         # been pulled into the depgraph.  This is not enabled by default
6576                         # due to the performance penalty that is incurred by all the
6577                         # additional dep_check calls that are required.
6578
6579                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6580                         for myroot in self.trees:
6581                                 vardb = self.trees[myroot]["vartree"].dbapi
6582                                 portdb = self.trees[myroot]["porttree"].dbapi
6583                                 pkgsettings = self.pkgsettings[myroot]
6584                                 final_db = self.mydbapi[myroot]
6585
6586                                 blocker_cache = BlockerCache(myroot, vardb)
6587                                 stale_cache = set(blocker_cache)
6588                                 for pkg in vardb:
6589                                         cpv = pkg.cpv
6590                                         stale_cache.discard(cpv)
6591                                         pkg_in_graph = self.digraph.contains(pkg)
6592
6593                                         # Check for masked installed packages. Only warn about
6594                                         # packages that are in the graph in order to avoid warning
6595                                         # about those that will be automatically uninstalled during
6596                                         # the merge process or by --depclean.
6597                                         if pkg in final_db:
6598                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6599                                                         self._masked_installed.add(pkg)
6600
6601                                         blocker_atoms = None
6602                                         blockers = None
6603                                         if pkg_in_graph:
6604                                                 blockers = []
6605                                                 try:
6606                                                         blockers.extend(
6607                                                                 self._blocker_parents.child_nodes(pkg))
6608                                                 except KeyError:
6609                                                         pass
6610                                                 try:
6611                                                         blockers.extend(
6612                                                                 self._irrelevant_blockers.child_nodes(pkg))
6613                                                 except KeyError:
6614                                                         pass
6615                                         if blockers is not None:
6616                                                 blockers = set(str(blocker.atom) \
6617                                                         for blocker in blockers)
6618
6619                                         # If this node has any blockers, create a "nomerge"
6620                                         # node for it so that they can be enforced.
6621                                         self.spinner.update()
6622                                         blocker_data = blocker_cache.get(cpv)
6623                                         if blocker_data is not None and \
6624                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6625                                                 blocker_data = None
6626
6627                                         # If blocker data from the graph is available, use
6628                                         # it to validate the cache and update the cache if
6629                                         # it seems invalid.
6630                                         if blocker_data is not None and \
6631                                                 blockers is not None:
6632                                                 if not blockers.symmetric_difference(
6633                                                         blocker_data.atoms):
6634                                                         continue
6635                                                 blocker_data = None
6636
6637                                         if blocker_data is None and \
6638                                                 blockers is not None:
6639                                                 # Re-use the blockers from the graph.
6640                                                 blocker_atoms = sorted(blockers)
6641                                                 counter = long(pkg.metadata["COUNTER"])
6642                                                 blocker_data = \
6643                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6644                                                 blocker_cache[pkg.cpv] = blocker_data
6645                                                 continue
6646
6647                                         if blocker_data:
6648                                                 blocker_atoms = blocker_data.atoms
6649                                         else:
6650                                                 # Use aux_get() to trigger FakeVartree global
6651                                                 # updates on *DEPEND when appropriate.
6652                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6653                                                 # It is crucial to pass in final_db here in order to
6654                                                 # optimize dep_check calls by eliminating atoms via
6655                                                 # dep_wordreduce and dep_eval calls.
6656                                                 try:
6657                                                         portage.dep._dep_check_strict = False
6658                                                         try:
6659                                                                 success, atoms = portage.dep_check(depstr,
6660                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6661                                                                         trees=self._graph_trees, myroot=myroot)
6662                                                         except Exception, e:
6663                                                                 if isinstance(e, SystemExit):
6664                                                                         raise
6665                                                                 # This is helpful, for example, if a ValueError
6666                                                                 # is thrown from cpv_expand due to multiple
6667                                                                 # matches (this can happen if an atom lacks a
6668                                                                 # category).
6669                                                                 show_invalid_depstring_notice(
6670                                                                         pkg, depstr, str(e))
6671                                                                 del e
6672                                                                 raise
6673                                                 finally:
6674                                                         portage.dep._dep_check_strict = True
6675                                                 if not success:
6676                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6677                                                         if replacement_pkg and \
6678                                                                 replacement_pkg[0].operation == "merge":
6679                                                                 # This package is being replaced anyway, so
6680                                                                 # ignore invalid dependencies so as not to
6681                                                                 # annoy the user too much (otherwise they'd be
6682                                                                 # forced to manually unmerge it first).
6683                                                                 continue
6684                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6685                                                         return False
6686                                                 blocker_atoms = [myatom for myatom in atoms \
6687                                                         if myatom.startswith("!")]
6688                                                 blocker_atoms.sort()
6689                                                 counter = long(pkg.metadata["COUNTER"])
6690                                                 blocker_cache[cpv] = \
6691                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6692                                         if blocker_atoms:
6693                                                 try:
6694                                                         for atom in blocker_atoms:
6695                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6696                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6697                                                                 self._blocker_parents.add(blocker, pkg)
6698                                                 except portage.exception.InvalidAtom, e:
6699                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6700                                                         show_invalid_depstring_notice(
6701                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6702                                                         return False
6703                                 for cpv in stale_cache:
6704                                         del blocker_cache[cpv]
6705                                 blocker_cache.flush()
6706                                 del blocker_cache
6707
6708                 # Discard any "uninstall" tasks scheduled by previous calls
6709                 # to this method, since those tasks may not make sense given
6710                 # the current graph state.
6711                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6712                 if previous_uninstall_tasks:
6713                         self._blocker_uninstalls = digraph()
6714                         self.digraph.difference_update(previous_uninstall_tasks)
6715
6716                 for blocker in self._blocker_parents.leaf_nodes():
6717                         self.spinner.update()
6718                         root_config = self.roots[blocker.root]
6719                         virtuals = root_config.settings.getvirtuals()
6720                         myroot = blocker.root
6721                         initial_db = self.trees[myroot]["vartree"].dbapi
6722                         final_db = self.mydbapi[myroot]
6723                         
6724                         provider_virtual = False
6725                         if blocker.cp in virtuals and \
6726                                 not self._have_new_virt(blocker.root, blocker.cp):
6727                                 provider_virtual = True
6728
6729                         if provider_virtual:
6730                                 atoms = []
6731                                 for provider_entry in virtuals[blocker.cp]:
6732                                         provider_cp = \
6733                                                 portage.dep_getkey(provider_entry)
6734                                         atoms.append(blocker.atom.replace(
6735                                                 blocker.cp, provider_cp))
6736                         else:
6737                                 atoms = [blocker.atom]
6738
6739                         blocked_initial = []
6740                         for atom in atoms:
6741                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6742
6743                         blocked_final = []
6744                         for atom in atoms:
6745                                 blocked_final.extend(final_db.match_pkgs(atom))
6746
6747                         if not blocked_initial and not blocked_final:
6748                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6749                                 self._blocker_parents.remove(blocker)
6750                                 # Discard any parents that don't have any more blockers.
6751                                 for pkg in parent_pkgs:
6752                                         self._irrelevant_blockers.add(blocker, pkg)
6753                                         if not self._blocker_parents.child_nodes(pkg):
6754                                                 self._blocker_parents.remove(pkg)
6755                                 continue
6756                         for parent in self._blocker_parents.parent_nodes(blocker):
6757                                 unresolved_blocks = False
6758                                 depends_on_order = set()
6759                                 for pkg in blocked_initial:
6760                                         if pkg.slot_atom == parent.slot_atom:
6761                                                 # TODO: Support blocks within slots in cases where it
6762                                                 # might make sense.  For example, a new version might
6763                                                 # require that the old version be uninstalled at build
6764                                                 # time.
6765                                                 continue
6766                                         if parent.installed:
6767                                                 # Two currently installed packages conflict with
6768                                                 # eachother. Ignore this case since the damage
6769                                                 # is already done and this would be likely to
6770                                                 # confuse users if displayed like a normal blocker.
6771                                                 continue
6772
6773                                         self._blocked_pkgs.add(pkg, blocker)
6774
6775                                         if parent.operation == "merge":
6776                                                 # Maybe the blocked package can be replaced or simply
6777                                                 # unmerged to resolve this block.
6778                                                 depends_on_order.add((pkg, parent))
6779                                                 continue
6780                                         # None of the above blocker resolutions techniques apply,
6781                                         # so apparently this one is unresolvable.
6782                                         unresolved_blocks = True
6783                                 for pkg in blocked_final:
6784                                         if pkg.slot_atom == parent.slot_atom:
6785                                                 # TODO: Support blocks within slots.
6786                                                 continue
6787                                         if parent.operation == "nomerge" and \
6788                                                 pkg.operation == "nomerge":
6789                                                 # This blocker will be handled the next time that a
6790                                                 # merge of either package is triggered.
6791                                                 continue
6792
6793                                         self._blocked_pkgs.add(pkg, blocker)
6794
6795                                         # Maybe the blocking package can be
6796                                         # unmerged to resolve this block.
6797                                         if parent.operation == "merge" and pkg.installed:
6798                                                 depends_on_order.add((pkg, parent))
6799                                                 continue
6800                                         elif parent.operation == "nomerge":
6801                                                 depends_on_order.add((parent, pkg))
6802                                                 continue
6803                                         # None of the above blocker resolutions techniques apply,
6804                                         # so apparently this one is unresolvable.
6805                                         unresolved_blocks = True
6806
6807                                 # Make sure we don't unmerge any package that have been pulled
6808                                 # into the graph.
6809                                 if not unresolved_blocks and depends_on_order:
6810                                         for inst_pkg, inst_task in depends_on_order:
6811                                                 if self.digraph.contains(inst_pkg) and \
6812                                                         self.digraph.parent_nodes(inst_pkg):
6813                                                         unresolved_blocks = True
6814                                                         break
6815
6816                                 if not unresolved_blocks and depends_on_order:
6817                                         for inst_pkg, inst_task in depends_on_order:
6818                                                 uninst_task = Package(built=inst_pkg.built,
6819                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6820                                                         metadata=inst_pkg.metadata,
6821                                                         operation="uninstall",
6822                                                         root_config=inst_pkg.root_config,
6823                                                         type_name=inst_pkg.type_name)
6824                                                 self._pkg_cache[uninst_task] = uninst_task
6825                                                 # Enforce correct merge order with a hard dep.
6826                                                 self.digraph.addnode(uninst_task, inst_task,
6827                                                         priority=BlockerDepPriority.instance)
6828                                                 # Count references to this blocker so that it can be
6829                                                 # invalidated after nodes referencing it have been
6830                                                 # merged.
6831                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6832                                 if not unresolved_blocks and not depends_on_order:
6833                                         self._irrelevant_blockers.add(blocker, parent)
6834                                         self._blocker_parents.remove_edge(blocker, parent)
6835                                         if not self._blocker_parents.parent_nodes(blocker):
6836                                                 self._blocker_parents.remove(blocker)
6837                                         if not self._blocker_parents.child_nodes(parent):
6838                                                 self._blocker_parents.remove(parent)
6839                                 if unresolved_blocks:
6840                                         self._unsolvable_blockers.add(blocker, parent)
6841
6842                 return True
6843
6844         def _accept_blocker_conflicts(self):
6845                 acceptable = False
6846                 for x in ("--buildpkgonly", "--fetchonly",
6847                         "--fetch-all-uri", "--nodeps"):
6848                         if x in self.myopts:
6849                                 acceptable = True
6850                                 break
6851                 return acceptable
6852
6853         def _merge_order_bias(self, mygraph):
6854                 """
6855                 For optimal leaf node selection, promote deep system runtime deps and
6856                 order nodes from highest to lowest overall reference count.
6857                 """
6858
6859                 node_info = {}
6860                 for node in mygraph.order:
6861                         node_info[node] = len(mygraph.parent_nodes(node))
6862                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6863
6864                 def cmp_merge_preference(node1, node2):
6865
6866                         if node1.operation == 'uninstall':
6867                                 if node2.operation == 'uninstall':
6868                                         return 0
6869                                 return 1
6870
6871                         if node2.operation == 'uninstall':
6872                                 if node1.operation == 'uninstall':
6873                                         return 0
6874                                 return -1
6875
6876                         node1_sys = node1 in deep_system_deps
6877                         node2_sys = node2 in deep_system_deps
6878                         if node1_sys != node2_sys:
6879                                 if node1_sys:
6880                                         return -1
6881                                 return 1
6882
6883                         return node_info[node2] - node_info[node1]
6884
6885                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6886
6887         def altlist(self, reversed=False):
6888
6889                 while self._serialized_tasks_cache is None:
6890                         self._resolve_conflicts()
6891                         try:
6892                                 self._serialized_tasks_cache, self._scheduler_graph = \
6893                                         self._serialize_tasks()
6894                         except self._serialize_tasks_retry:
6895                                 pass
6896
6897                 retlist = self._serialized_tasks_cache[:]
6898                 if reversed:
6899                         retlist.reverse()
6900                 return retlist
6901
6902         def schedulerGraph(self):
6903                 """
6904                 The scheduler graph is identical to the normal one except that
6905                 uninstall edges are reversed in specific cases that require
6906                 conflicting packages to be temporarily installed simultaneously.
6907                 This is intended for use by the Scheduler in it's parallelization
6908                 logic. It ensures that temporary simultaneous installation of
6909                 conflicting packages is avoided when appropriate (especially for
6910                 !!atom blockers), but allowed in specific cases that require it.
6911
6912                 Note that this method calls break_refs() which alters the state of
6913                 internal Package instances such that this depgraph instance should
6914                 not be used to perform any more calculations.
6915                 """
6916                 if self._scheduler_graph is None:
6917                         self.altlist()
6918                 self.break_refs(self._scheduler_graph.order)
6919                 return self._scheduler_graph
6920
6921         def break_refs(self, nodes):
6922                 """
6923                 Take a mergelist like that returned from self.altlist() and
6924                 break any references that lead back to the depgraph. This is
6925                 useful if you want to hold references to packages without
6926                 also holding the depgraph on the heap.
6927                 """
6928                 for node in nodes:
6929                         if hasattr(node, "root_config"):
6930                                 # The FakeVartree references the _package_cache which
6931                                 # references the depgraph. So that Package instances don't
6932                                 # hold the depgraph and FakeVartree on the heap, replace
6933                                 # the RootConfig that references the FakeVartree with the
6934                                 # original RootConfig instance which references the actual
6935                                 # vartree.
6936                                 node.root_config = \
6937                                         self._trees_orig[node.root_config.root]["root_config"]
6938
6939         def _resolve_conflicts(self):
6940                 if not self._complete_graph():
6941                         raise self._unknown_internal_error()
6942
6943                 if not self.validate_blockers():
6944                         raise self._unknown_internal_error()
6945
6946                 if self._slot_collision_info:
6947                         self._process_slot_conflicts()
6948
6949         def _serialize_tasks(self):
6950
6951                 if "--debug" in self.myopts:
6952                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6953                         self.digraph.debug_print()
6954                         writemsg("\n", noiselevel=-1)
6955
6956                 scheduler_graph = self.digraph.copy()
6957                 mygraph=self.digraph.copy()
6958                 # Prune "nomerge" root nodes if nothing depends on them, since
6959                 # otherwise they slow down merge order calculation. Don't remove
6960                 # non-root nodes since they help optimize merge order in some cases
6961                 # such as revdep-rebuild.
6962                 removed_nodes = set()
6963                 while True:
6964                         for node in mygraph.root_nodes():
6965                                 if not isinstance(node, Package) or \
6966                                         node.installed or node.onlydeps:
6967                                         removed_nodes.add(node)
6968                         if removed_nodes:
6969                                 self.spinner.update()
6970                                 mygraph.difference_update(removed_nodes)
6971                         if not removed_nodes:
6972                                 break
6973                         removed_nodes.clear()
6974                 self._merge_order_bias(mygraph)
6975                 def cmp_circular_bias(n1, n2):
6976                         """
6977                         RDEPEND is stronger than PDEPEND and this function
6978                         measures such a strength bias within a circular
6979                         dependency relationship.
6980                         """
6981                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6982                                 ignore_priority=priority_range.ignore_medium_soft)
6983                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6984                                 ignore_priority=priority_range.ignore_medium_soft)
6985                         if n1_n2_medium == n2_n1_medium:
6986                                 return 0
6987                         elif n1_n2_medium:
6988                                 return 1
6989                         return -1
6990                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6991                 retlist=[]
6992                 # Contains uninstall tasks that have been scheduled to
6993                 # occur after overlapping blockers have been installed.
6994                 scheduled_uninstalls = set()
6995                 # Contains any Uninstall tasks that have been ignored
6996                 # in order to avoid the circular deps code path. These
6997                 # correspond to blocker conflicts that could not be
6998                 # resolved.
6999                 ignored_uninstall_tasks = set()
7000                 have_uninstall_task = False
7001                 complete = "complete" in self.myparams
7002                 asap_nodes = []
7003
7004                 def get_nodes(**kwargs):
7005                         """
7006                         Returns leaf nodes excluding Uninstall instances
7007                         since those should be executed as late as possible.
7008                         """
7009                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7010                                 if isinstance(node, Package) and \
7011                                         (node.operation != "uninstall" or \
7012                                         node in scheduled_uninstalls)]
7013
7014                 # sys-apps/portage needs special treatment if ROOT="/"
7015                 running_root = self._running_root.root
7016                 from portage.const import PORTAGE_PACKAGE_ATOM
7017                 runtime_deps = InternalPackageSet(
7018                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7019                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7020                         PORTAGE_PACKAGE_ATOM)
7021                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7022                         PORTAGE_PACKAGE_ATOM)
7023
7024                 if running_portage:
7025                         running_portage = running_portage[0]
7026                 else:
7027                         running_portage = None
7028
7029                 if replacement_portage:
7030                         replacement_portage = replacement_portage[0]
7031                 else:
7032                         replacement_portage = None
7033
7034                 if replacement_portage == running_portage:
7035                         replacement_portage = None
7036
7037                 if replacement_portage is not None:
7038                         # update from running_portage to replacement_portage asap
7039                         asap_nodes.append(replacement_portage)
7040
7041                 if running_portage is not None:
7042                         try:
7043                                 portage_rdepend = self._select_atoms_highest_available(
7044                                         running_root, running_portage.metadata["RDEPEND"],
7045                                         myuse=running_portage.use.enabled,
7046                                         parent=running_portage, strict=False)
7047                         except portage.exception.InvalidDependString, e:
7048                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7049                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7050                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7051                                 del e
7052                                 portage_rdepend = []
7053                         runtime_deps.update(atom for atom in portage_rdepend \
7054                                 if not atom.startswith("!"))
7055
7056                 def gather_deps(ignore_priority, mergeable_nodes,
7057                         selected_nodes, node):
7058                         """
7059                         Recursively gather a group of nodes that RDEPEND on
7060                         eachother. This ensures that they are merged as a group
7061                         and get their RDEPENDs satisfied as soon as possible.
7062                         """
7063                         if node in selected_nodes:
7064                                 return True
7065                         if node not in mergeable_nodes:
7066                                 return False
7067                         if node == replacement_portage and \
7068                                 mygraph.child_nodes(node,
7069                                 ignore_priority=priority_range.ignore_medium_soft):
7070                                 # Make sure that portage always has all of it's
7071                                 # RDEPENDs installed first.
7072                                 return False
7073                         selected_nodes.add(node)
7074                         for child in mygraph.child_nodes(node,
7075                                 ignore_priority=ignore_priority):
7076                                 if not gather_deps(ignore_priority,
7077                                         mergeable_nodes, selected_nodes, child):
7078                                         return False
7079                         return True
7080
7081                 def ignore_uninst_or_med(priority):
7082                         if priority is BlockerDepPriority.instance:
7083                                 return True
7084                         return priority_range.ignore_medium(priority)
7085
7086                 def ignore_uninst_or_med_soft(priority):
7087                         if priority is BlockerDepPriority.instance:
7088                                 return True
7089                         return priority_range.ignore_medium_soft(priority)
7090
7091                 tree_mode = "--tree" in self.myopts
7092                 # Tracks whether or not the current iteration should prefer asap_nodes
7093                 # if available.  This is set to False when the previous iteration
7094                 # failed to select any nodes.  It is reset whenever nodes are
7095                 # successfully selected.
7096                 prefer_asap = True
7097
7098                 # Controls whether or not the current iteration should drop edges that
7099                 # are "satisfied" by installed packages, in order to solve circular
7100                 # dependencies. The deep runtime dependencies of installed packages are
7101                 # not checked in this case (bug #199856), so it must be avoided
7102                 # whenever possible.
7103                 drop_satisfied = False
7104
7105                 # State of variables for successive iterations that loosen the
7106                 # criteria for node selection.
7107                 #
7108                 # iteration   prefer_asap   drop_satisfied
7109                 # 1           True          False
7110                 # 2           False         False
7111                 # 3           False         True
7112                 #
7113                 # If no nodes are selected on the last iteration, it is due to
7114                 # unresolved blockers or circular dependencies.
7115
7116                 while not mygraph.empty():
7117                         self.spinner.update()
7118                         selected_nodes = None
7119                         ignore_priority = None
7120                         if drop_satisfied or (prefer_asap and asap_nodes):
7121                                 priority_range = DepPrioritySatisfiedRange
7122                         else:
7123                                 priority_range = DepPriorityNormalRange
7124                         if prefer_asap and asap_nodes:
7125                                 # ASAP nodes are merged before their soft deps. Go ahead and
7126                                 # select root nodes here if necessary, since it's typical for
7127                                 # the parent to have been removed from the graph already.
7128                                 asap_nodes = [node for node in asap_nodes \
7129                                         if mygraph.contains(node)]
7130                                 for node in asap_nodes:
7131                                         if not mygraph.child_nodes(node,
7132                                                 ignore_priority=priority_range.ignore_soft):
7133                                                 selected_nodes = [node]
7134                                                 asap_nodes.remove(node)
7135                                                 break
7136                         if not selected_nodes and \
7137                                 not (prefer_asap and asap_nodes):
7138                                 for i in xrange(priority_range.NONE,
7139                                         priority_range.MEDIUM_SOFT + 1):
7140                                         ignore_priority = priority_range.ignore_priority[i]
7141                                         nodes = get_nodes(ignore_priority=ignore_priority)
7142                                         if nodes:
7143                                                 # If there is a mix of uninstall nodes with other
7144                                                 # types, save the uninstall nodes for later since
7145                                                 # sometimes a merge node will render an uninstall
7146                                                 # node unnecessary (due to occupying the same slot),
7147                                                 # and we want to avoid executing a separate uninstall
7148                                                 # task in that case.
7149                                                 if len(nodes) > 1:
7150                                                         good_uninstalls = []
7151                                                         with_some_uninstalls_excluded = []
7152                                                         for node in nodes:
7153                                                                 if node.operation == "uninstall":
7154                                                                         slot_node = self.mydbapi[node.root
7155                                                                                 ].match_pkgs(node.slot_atom)
7156                                                                         if slot_node and \
7157                                                                                 slot_node[0].operation == "merge":
7158                                                                                 continue
7159                                                                         good_uninstalls.append(node)
7160                                                                 with_some_uninstalls_excluded.append(node)
7161                                                         if good_uninstalls:
7162                                                                 nodes = good_uninstalls
7163                                                         elif with_some_uninstalls_excluded:
7164                                                                 nodes = with_some_uninstalls_excluded
7165                                                         else:
7166                                                                 nodes = nodes
7167
7168                                                 if ignore_priority is None and not tree_mode:
7169                                                         # Greedily pop all of these nodes since no
7170                                                         # relationship has been ignored. This optimization
7171                                                         # destroys --tree output, so it's disabled in tree
7172                                                         # mode.
7173                                                         selected_nodes = nodes
7174                                                 else:
7175                                                         # For optimal merge order:
7176                                                         #  * Only pop one node.
7177                                                         #  * Removing a root node (node without a parent)
7178                                                         #    will not produce a leaf node, so avoid it.
7179                                                         #  * It's normal for a selected uninstall to be a
7180                                                         #    root node, so don't check them for parents.
7181                                                         for node in nodes:
7182                                                                 if node.operation == "uninstall" or \
7183                                                                         mygraph.parent_nodes(node):
7184                                                                         selected_nodes = [node]
7185                                                                         break
7186
7187                                                 if selected_nodes:
7188                                                         break
7189
7190                         if not selected_nodes:
7191                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7192                                 if nodes:
7193                                         mergeable_nodes = set(nodes)
7194                                         if prefer_asap and asap_nodes:
7195                                                 nodes = asap_nodes
7196                                         for i in xrange(priority_range.SOFT,
7197                                                 priority_range.MEDIUM_SOFT + 1):
7198                                                 ignore_priority = priority_range.ignore_priority[i]
7199                                                 for node in nodes:
7200                                                         if not mygraph.parent_nodes(node):
7201                                                                 continue
7202                                                         selected_nodes = set()
7203                                                         if gather_deps(ignore_priority,
7204                                                                 mergeable_nodes, selected_nodes, node):
7205                                                                 break
7206                                                         else:
7207                                                                 selected_nodes = None
7208                                                 if selected_nodes:
7209                                                         break
7210
7211                                         if prefer_asap and asap_nodes and not selected_nodes:
7212                                                 # We failed to find any asap nodes to merge, so ignore
7213                                                 # them for the next iteration.
7214                                                 prefer_asap = False
7215                                                 continue
7216
7217                         if selected_nodes and ignore_priority is not None:
7218                                 # Try to merge ignored medium_soft deps as soon as possible
7219                                 # if they're not satisfied by installed packages.
7220                                 for node in selected_nodes:
7221                                         children = set(mygraph.child_nodes(node))
7222                                         soft = children.difference(
7223                                                 mygraph.child_nodes(node,
7224                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7225                                         medium_soft = children.difference(
7226                                                 mygraph.child_nodes(node,
7227                                                         ignore_priority = \
7228                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7229                                         medium_soft.difference_update(soft)
7230                                         for child in medium_soft:
7231                                                 if child in selected_nodes:
7232                                                         continue
7233                                                 if child in asap_nodes:
7234                                                         continue
7235                                                 asap_nodes.append(child)
7236
7237                         if selected_nodes and len(selected_nodes) > 1:
7238                                 if not isinstance(selected_nodes, list):
7239                                         selected_nodes = list(selected_nodes)
7240                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7241
7242                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7243                                 # An Uninstall task needs to be executed in order to
7244                                 # avoid conflict if possible.
7245
7246                                 if drop_satisfied:
7247                                         priority_range = DepPrioritySatisfiedRange
7248                                 else:
7249                                         priority_range = DepPriorityNormalRange
7250
7251                                 mergeable_nodes = get_nodes(
7252                                         ignore_priority=ignore_uninst_or_med)
7253
7254                                 min_parent_deps = None
7255                                 uninst_task = None
7256                                 for task in myblocker_uninstalls.leaf_nodes():
7257                                         # Do some sanity checks so that system or world packages
7258                                         # don't get uninstalled inappropriately here (only really
7259                                         # necessary when --complete-graph has not been enabled).
7260
7261                                         if task in ignored_uninstall_tasks:
7262                                                 continue
7263
7264                                         if task in scheduled_uninstalls:
7265                                                 # It's been scheduled but it hasn't
7266                                                 # been executed yet due to dependence
7267                                                 # on installation of blocking packages.
7268                                                 continue
7269
7270                                         root_config = self.roots[task.root]
7271                                         inst_pkg = self._pkg_cache[
7272                                                 ("installed", task.root, task.cpv, "nomerge")]
7273
7274                                         if self.digraph.contains(inst_pkg):
7275                                                 continue
7276
7277                                         forbid_overlap = False
7278                                         heuristic_overlap = False
7279                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7280                                                 if blocker.eapi in ("0", "1"):
7281                                                         heuristic_overlap = True
7282                                                 elif blocker.atom.blocker.overlap.forbid:
7283                                                         forbid_overlap = True
7284                                                         break
7285                                         if forbid_overlap and running_root == task.root:
7286                                                 continue
7287
7288                                         if heuristic_overlap and running_root == task.root:
7289                                                 # Never uninstall sys-apps/portage or it's essential
7290                                                 # dependencies, except through replacement.
7291                                                 try:
7292                                                         runtime_dep_atoms = \
7293                                                                 list(runtime_deps.iterAtomsForPackage(task))
7294                                                 except portage.exception.InvalidDependString, e:
7295                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7296                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7297                                                                 (task.root, task.cpv, e), noiselevel=-1)
7298                                                         del e
7299                                                         continue
7300
7301                                                 # Don't uninstall a runtime dep if it appears
7302                                                 # to be the only suitable one installed.
7303                                                 skip = False
7304                                                 vardb = root_config.trees["vartree"].dbapi
7305                                                 for atom in runtime_dep_atoms:
7306                                                         other_version = None
7307                                                         for pkg in vardb.match_pkgs(atom):
7308                                                                 if pkg.cpv == task.cpv and \
7309                                                                         pkg.metadata["COUNTER"] == \
7310                                                                         task.metadata["COUNTER"]:
7311                                                                         continue
7312                                                                 other_version = pkg
7313                                                                 break
7314                                                         if other_version is None:
7315                                                                 skip = True
7316                                                                 break
7317                                                 if skip:
7318                                                         continue
7319
7320                                                 # For packages in the system set, don't take
7321                                                 # any chances. If the conflict can't be resolved
7322                                                 # by a normal replacement operation then abort.
7323                                                 skip = False
7324                                                 try:
7325                                                         for atom in root_config.sets[
7326                                                                 "system"].iterAtomsForPackage(task):
7327                                                                 skip = True
7328                                                                 break
7329                                                 except portage.exception.InvalidDependString, e:
7330                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7331                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7332                                                                 (task.root, task.cpv, e), noiselevel=-1)
7333                                                         del e
7334                                                         skip = True
7335                                                 if skip:
7336                                                         continue
7337
7338                                         # Note that the world check isn't always
7339                                         # necessary since self._complete_graph() will
7340                                         # add all packages from the system and world sets to the
7341                                         # graph. This just allows unresolved conflicts to be
7342                                         # detected as early as possible, which makes it possible
7343                                         # to avoid calling self._complete_graph() when it is
7344                                         # unnecessary due to blockers triggering an abortion.
7345                                         if not complete:
7346                                                 # For packages in the world set, go ahead an uninstall
7347                                                 # when necessary, as long as the atom will be satisfied
7348                                                 # in the final state.
7349                                                 graph_db = self.mydbapi[task.root]
7350                                                 skip = False
7351                                                 try:
7352                                                         for atom in root_config.sets[
7353                                                                 "world"].iterAtomsForPackage(task):
7354                                                                 satisfied = False
7355                                                                 for pkg in graph_db.match_pkgs(atom):
7356                                                                         if pkg == inst_pkg:
7357                                                                                 continue
7358                                                                         satisfied = True
7359                                                                         break
7360                                                                 if not satisfied:
7361                                                                         skip = True
7362                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7363                                                                         break
7364                                                 except portage.exception.InvalidDependString, e:
7365                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7366                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7367                                                                 (task.root, task.cpv, e), noiselevel=-1)
7368                                                         del e
7369                                                         skip = True
7370                                                 if skip:
7371                                                         continue
7372
7373                                         # Check the deps of parent nodes to ensure that
7374                                         # the chosen task produces a leaf node. Maybe
7375                                         # this can be optimized some more to make the
7376                                         # best possible choice, but the current algorithm
7377                                         # is simple and should be near optimal for most
7378                                         # common cases.
7379                                         mergeable_parent = False
7380                                         parent_deps = set()
7381                                         for parent in mygraph.parent_nodes(task):
7382                                                 parent_deps.update(mygraph.child_nodes(parent,
7383                                                         ignore_priority=priority_range.ignore_medium_soft))
7384                                                 if parent in mergeable_nodes and \
7385                                                         gather_deps(ignore_uninst_or_med_soft,
7386                                                         mergeable_nodes, set(), parent):
7387                                                         mergeable_parent = True
7388
7389                                         if not mergeable_parent:
7390                                                 continue
7391
7392                                         parent_deps.remove(task)
7393                                         if min_parent_deps is None or \
7394                                                 len(parent_deps) < min_parent_deps:
7395                                                 min_parent_deps = len(parent_deps)
7396                                                 uninst_task = task
7397
7398                                 if uninst_task is not None:
7399                                         # The uninstall is performed only after blocking
7400                                         # packages have been merged on top of it. File
7401                                         # collisions between blocking packages are detected
7402                                         # and removed from the list of files to be uninstalled.
7403                                         scheduled_uninstalls.add(uninst_task)
7404                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7405
7406                                         # Reverse the parent -> uninstall edges since we want
7407                                         # to do the uninstall after blocking packages have
7408                                         # been merged on top of it.
7409                                         mygraph.remove(uninst_task)
7410                                         for blocked_pkg in parent_nodes:
7411                                                 mygraph.add(blocked_pkg, uninst_task,
7412                                                         priority=BlockerDepPriority.instance)
7413                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7414                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7415                                                         priority=BlockerDepPriority.instance)
7416
7417                                         # Reset the state variables for leaf node selection and
7418                                         # continue trying to select leaf nodes.
7419                                         prefer_asap = True
7420                                         drop_satisfied = False
7421                                         continue
7422
7423                         if not selected_nodes:
7424                                 # Only select root nodes as a last resort. This case should
7425                                 # only trigger when the graph is nearly empty and the only
7426                                 # remaining nodes are isolated (no parents or children). Since
7427                                 # the nodes must be isolated, ignore_priority is not needed.
7428                                 selected_nodes = get_nodes()
7429
7430                         if not selected_nodes and not drop_satisfied:
7431                                 drop_satisfied = True
7432                                 continue
7433
7434                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7435                                 # If possible, drop an uninstall task here in order to avoid
7436                                 # the circular deps code path. The corresponding blocker will
7437                                 # still be counted as an unresolved conflict.
7438                                 uninst_task = None
7439                                 for node in myblocker_uninstalls.leaf_nodes():
7440                                         try:
7441                                                 mygraph.remove(node)
7442                                         except KeyError:
7443                                                 pass
7444                                         else:
7445                                                 uninst_task = node
7446                                                 ignored_uninstall_tasks.add(node)
7447                                                 break
7448
7449                                 if uninst_task is not None:
7450                                         # Reset the state variables for leaf node selection and
7451                                         # continue trying to select leaf nodes.
7452                                         prefer_asap = True
7453                                         drop_satisfied = False
7454                                         continue
7455
7456                         if not selected_nodes:
7457                                 self._circular_deps_for_display = mygraph
7458                                 raise self._unknown_internal_error()
7459
7460                         # At this point, we've succeeded in selecting one or more nodes, so
7461                         # reset state variables for leaf node selection.
7462                         prefer_asap = True
7463                         drop_satisfied = False
7464
7465                         mygraph.difference_update(selected_nodes)
7466
7467                         for node in selected_nodes:
7468                                 if isinstance(node, Package) and \
7469                                         node.operation == "nomerge":
7470                                         continue
7471
7472                                 # Handle interactions between blockers
7473                                 # and uninstallation tasks.
7474                                 solved_blockers = set()
7475                                 uninst_task = None
7476                                 if isinstance(node, Package) and \
7477                                         "uninstall" == node.operation:
7478                                         have_uninstall_task = True
7479                                         uninst_task = node
7480                                 else:
7481                                         vardb = self.trees[node.root]["vartree"].dbapi
7482                                         previous_cpv = vardb.match(node.slot_atom)
7483                                         if previous_cpv:
7484                                                 # The package will be replaced by this one, so remove
7485                                                 # the corresponding Uninstall task if necessary.
7486                                                 previous_cpv = previous_cpv[0]
7487                                                 uninst_task = \
7488                                                         ("installed", node.root, previous_cpv, "uninstall")
7489                                                 try:
7490                                                         mygraph.remove(uninst_task)
7491                                                 except KeyError:
7492                                                         pass
7493
7494                                 if uninst_task is not None and \
7495                                         uninst_task not in ignored_uninstall_tasks and \
7496                                         myblocker_uninstalls.contains(uninst_task):
7497                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7498                                         myblocker_uninstalls.remove(uninst_task)
7499                                         # Discard any blockers that this Uninstall solves.
7500                                         for blocker in blocker_nodes:
7501                                                 if not myblocker_uninstalls.child_nodes(blocker):
7502                                                         myblocker_uninstalls.remove(blocker)
7503                                                         solved_blockers.add(blocker)
7504
7505                                 retlist.append(node)
7506
7507                                 if (isinstance(node, Package) and \
7508                                         "uninstall" == node.operation) or \
7509                                         (uninst_task is not None and \
7510                                         uninst_task in scheduled_uninstalls):
7511                                         # Include satisfied blockers in the merge list
7512                                         # since the user might be interested and also
7513                                         # it serves as an indicator that blocking packages
7514                                         # will be temporarily installed simultaneously.
7515                                         for blocker in solved_blockers:
7516                                                 retlist.append(Blocker(atom=blocker.atom,
7517                                                         root=blocker.root, eapi=blocker.eapi,
7518                                                         satisfied=True))
7519
7520                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7521                 for node in myblocker_uninstalls.root_nodes():
7522                         unsolvable_blockers.add(node)
7523
7524                 for blocker in unsolvable_blockers:
7525                         retlist.append(blocker)
7526
7527                 # If any Uninstall tasks need to be executed in order
7528                 # to avoid a conflict, complete the graph with any
7529                 # dependencies that may have been initially
7530                 # neglected (to ensure that unsafe Uninstall tasks
7531                 # are properly identified and blocked from execution).
7532                 if have_uninstall_task and \
7533                         not complete and \
7534                         not unsolvable_blockers:
7535                         self.myparams.add("complete")
7536                         raise self._serialize_tasks_retry("")
7537
7538                 if unsolvable_blockers and \
7539                         not self._accept_blocker_conflicts():
7540                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7541                         self._serialized_tasks_cache = retlist[:]
7542                         self._scheduler_graph = scheduler_graph
7543                         raise self._unknown_internal_error()
7544
7545                 if self._slot_collision_info and \
7546                         not self._accept_blocker_conflicts():
7547                         self._serialized_tasks_cache = retlist[:]
7548                         self._scheduler_graph = scheduler_graph
7549                         raise self._unknown_internal_error()
7550
7551                 return retlist, scheduler_graph
7552
7553         def _show_circular_deps(self, mygraph):
7554                 # No leaf nodes are available, so we have a circular
7555                 # dependency panic situation.  Reduce the noise level to a
7556                 # minimum via repeated elimination of root nodes since they
7557                 # have no parents and thus can not be part of a cycle.
7558                 while True:
7559                         root_nodes = mygraph.root_nodes(
7560                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7561                         if not root_nodes:
7562                                 break
7563                         mygraph.difference_update(root_nodes)
7564                 # Display the USE flags that are enabled on nodes that are part
7565                 # of dependency cycles in case that helps the user decide to
7566                 # disable some of them.
7567                 display_order = []
7568                 tempgraph = mygraph.copy()
7569                 while not tempgraph.empty():
7570                         nodes = tempgraph.leaf_nodes()
7571                         if not nodes:
7572                                 node = tempgraph.order[0]
7573                         else:
7574                                 node = nodes[0]
7575                         display_order.append(node)
7576                         tempgraph.remove(node)
7577                 display_order.reverse()
7578                 self.myopts.pop("--quiet", None)
7579                 self.myopts.pop("--verbose", None)
7580                 self.myopts["--tree"] = True
7581                 portage.writemsg("\n\n", noiselevel=-1)
7582                 self.display(display_order)
7583                 prefix = colorize("BAD", " * ")
7584                 portage.writemsg("\n", noiselevel=-1)
7585                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7586                         noiselevel=-1)
7587                 portage.writemsg("\n", noiselevel=-1)
7588                 mygraph.debug_print()
7589                 portage.writemsg("\n", noiselevel=-1)
7590                 portage.writemsg(prefix + "Note that circular dependencies " + \
7591                         "can often be avoided by temporarily\n", noiselevel=-1)
7592                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7593                         "optional dependencies.\n", noiselevel=-1)
7594
7595         def _show_merge_list(self):
7596                 if self._serialized_tasks_cache is not None and \
7597                         not (self._displayed_list and \
7598                         (self._displayed_list == self._serialized_tasks_cache or \
7599                         self._displayed_list == \
7600                                 list(reversed(self._serialized_tasks_cache)))):
7601                         display_list = self._serialized_tasks_cache[:]
7602                         if "--tree" in self.myopts:
7603                                 display_list.reverse()
7604                         self.display(display_list)
7605
7606         def _show_unsatisfied_blockers(self, blockers):
7607                 self._show_merge_list()
7608                 msg = "Error: The above package list contains " + \
7609                         "packages which cannot be installed " + \
7610                         "at the same time on the same system."
7611                 prefix = colorize("BAD", " * ")
7612                 from textwrap import wrap
7613                 portage.writemsg("\n", noiselevel=-1)
7614                 for line in wrap(msg, 70):
7615                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7616
7617                 # Display the conflicting packages along with the packages
7618                 # that pulled them in. This is helpful for troubleshooting
7619                 # cases in which blockers don't solve automatically and
7620                 # the reasons are not apparent from the normal merge list
7621                 # display.
7622
7623                 conflict_pkgs = {}
7624                 for blocker in blockers:
7625                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7626                                 self._blocker_parents.parent_nodes(blocker)):
7627                                 parent_atoms = self._parent_atoms.get(pkg)
7628                                 if not parent_atoms:
7629                                         atom = self._blocked_world_pkgs.get(pkg)
7630                                         if atom is not None:
7631                                                 parent_atoms = set([("@world", atom)])
7632                                 if parent_atoms:
7633                                         conflict_pkgs[pkg] = parent_atoms
7634
7635                 if conflict_pkgs:
7636                         # Reduce noise by pruning packages that are only
7637                         # pulled in by other conflict packages.
7638                         pruned_pkgs = set()
7639                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7640                                 relevant_parent = False
7641                                 for parent, atom in parent_atoms:
7642                                         if parent not in conflict_pkgs:
7643                                                 relevant_parent = True
7644                                                 break
7645                                 if not relevant_parent:
7646                                         pruned_pkgs.add(pkg)
7647                         for pkg in pruned_pkgs:
7648                                 del conflict_pkgs[pkg]
7649
7650                 if conflict_pkgs:
7651                         msg = []
7652                         msg.append("\n")
7653                         indent = "  "
7654                         # Max number of parents shown, to avoid flooding the display.
7655                         max_parents = 3
7656                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7657
7658                                 pruned_list = set()
7659
7660                                 # Prefer packages that are not directly involved in a conflict.
7661                                 for parent_atom in parent_atoms:
7662                                         if len(pruned_list) >= max_parents:
7663                                                 break
7664                                         parent, atom = parent_atom
7665                                         if parent not in conflict_pkgs:
7666                                                 pruned_list.add(parent_atom)
7667
7668                                 for parent_atom in parent_atoms:
7669                                         if len(pruned_list) >= max_parents:
7670                                                 break
7671                                         pruned_list.add(parent_atom)
7672
7673                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7674                                 msg.append(indent + "%s pulled in by\n" % pkg)
7675
7676                                 for parent_atom in pruned_list:
7677                                         parent, atom = parent_atom
7678                                         msg.append(2*indent)
7679                                         if isinstance(parent,
7680                                                 (PackageArg, AtomArg)):
7681                                                 # For PackageArg and AtomArg types, it's
7682                                                 # redundant to display the atom attribute.
7683                                                 msg.append(str(parent))
7684                                         else:
7685                                                 # Display the specific atom from SetArg or
7686                                                 # Package types.
7687                                                 msg.append("%s required by %s" % (atom, parent))
7688                                         msg.append("\n")
7689
7690                                 if omitted_parents:
7691                                         msg.append(2*indent)
7692                                         msg.append("(and %d more)\n" % omitted_parents)
7693
7694                                 msg.append("\n")
7695
7696                         sys.stderr.write("".join(msg))
7697                         sys.stderr.flush()
7698
7699                 if "--quiet" not in self.myopts:
7700                         show_blocker_docs_link()
7701
7702         def display(self, mylist, favorites=[], verbosity=None):
7703
7704                 # This is used to prevent display_problems() from
7705                 # redundantly displaying this exact same merge list
7706                 # again via _show_merge_list().
7707                 self._displayed_list = mylist
7708
7709                 if verbosity is None:
7710                         verbosity = ("--quiet" in self.myopts and 1 or \
7711                                 "--verbose" in self.myopts and 3 or 2)
7712                 favorites_set = InternalPackageSet(favorites)
7713                 oneshot = "--oneshot" in self.myopts or \
7714                         "--onlydeps" in self.myopts
7715                 columns = "--columns" in self.myopts
7716                 changelogs=[]
7717                 p=[]
7718                 blockers = []
7719
7720                 counters = PackageCounters()
7721
7722                 if verbosity == 1 and "--verbose" not in self.myopts:
7723                         def create_use_string(*args):
7724                                 return ""
7725                 else:
7726                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7727                                 old_iuse, old_use,
7728                                 is_new, reinst_flags,
7729                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7730                                 alphabetical=("--alphabetical" in self.myopts)):
7731                                 enabled = []
7732                                 if alphabetical:
7733                                         disabled = enabled
7734                                         removed = enabled
7735                                 else:
7736                                         disabled = []
7737                                         removed = []
7738                                 cur_iuse = set(cur_iuse)
7739                                 enabled_flags = cur_iuse.intersection(cur_use)
7740                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7741                                 any_iuse = cur_iuse.union(old_iuse)
7742                                 any_iuse = list(any_iuse)
7743                                 any_iuse.sort()
7744                                 for flag in any_iuse:
7745                                         flag_str = None
7746                                         isEnabled = False
7747                                         reinst_flag = reinst_flags and flag in reinst_flags
7748                                         if flag in enabled_flags:
7749                                                 isEnabled = True
7750                                                 if is_new or flag in old_use and \
7751                                                         (all_flags or reinst_flag):
7752                                                         flag_str = red(flag)
7753                                                 elif flag not in old_iuse:
7754                                                         flag_str = yellow(flag) + "%*"
7755                                                 elif flag not in old_use:
7756                                                         flag_str = green(flag) + "*"
7757                                         elif flag in removed_iuse:
7758                                                 if all_flags or reinst_flag:
7759                                                         flag_str = yellow("-" + flag) + "%"
7760                                                         if flag in old_use:
7761                                                                 flag_str += "*"
7762                                                         flag_str = "(" + flag_str + ")"
7763                                                         removed.append(flag_str)
7764                                                 continue
7765                                         else:
7766                                                 if is_new or flag in old_iuse and \
7767                                                         flag not in old_use and \
7768                                                         (all_flags or reinst_flag):
7769                                                         flag_str = blue("-" + flag)
7770                                                 elif flag not in old_iuse:
7771                                                         flag_str = yellow("-" + flag)
7772                                                         if flag not in iuse_forced:
7773                                                                 flag_str += "%"
7774                                                 elif flag in old_use:
7775                                                         flag_str = green("-" + flag) + "*"
7776                                         if flag_str:
7777                                                 if flag in iuse_forced:
7778                                                         flag_str = "(" + flag_str + ")"
7779                                                 if isEnabled:
7780                                                         enabled.append(flag_str)
7781                                                 else:
7782                                                         disabled.append(flag_str)
7783
7784                                 if alphabetical:
7785                                         ret = " ".join(enabled)
7786                                 else:
7787                                         ret = " ".join(enabled + disabled + removed)
7788                                 if ret:
7789                                         ret = '%s="%s" ' % (name, ret)
7790                                 return ret
7791
7792                 repo_display = RepoDisplay(self.roots)
7793
7794                 tree_nodes = []
7795                 display_list = []
7796                 mygraph = self.digraph.copy()
7797
7798                 # If there are any Uninstall instances, add the corresponding
7799                 # blockers to the digraph (useful for --tree display).
7800
7801                 executed_uninstalls = set(node for node in mylist \
7802                         if isinstance(node, Package) and node.operation == "unmerge")
7803
7804                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7805                         uninstall_parents = \
7806                                 self._blocker_uninstalls.parent_nodes(uninstall)
7807                         if not uninstall_parents:
7808                                 continue
7809
7810                         # Remove the corresponding "nomerge" node and substitute
7811                         # the Uninstall node.
7812                         inst_pkg = self._pkg_cache[
7813                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7814                         try:
7815                                 mygraph.remove(inst_pkg)
7816                         except KeyError:
7817                                 pass
7818
7819                         try:
7820                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7821                         except KeyError:
7822                                 inst_pkg_blockers = []
7823
7824                         # Break the Package -> Uninstall edges.
7825                         mygraph.remove(uninstall)
7826
7827                         # Resolution of a package's blockers
7828                         # depend on it's own uninstallation.
7829                         for blocker in inst_pkg_blockers:
7830                                 mygraph.add(uninstall, blocker)
7831
7832                         # Expand Package -> Uninstall edges into
7833                         # Package -> Blocker -> Uninstall edges.
7834                         for blocker in uninstall_parents:
7835                                 mygraph.add(uninstall, blocker)
7836                                 for parent in self._blocker_parents.parent_nodes(blocker):
7837                                         if parent != inst_pkg:
7838                                                 mygraph.add(blocker, parent)
7839
7840                         # If the uninstall task did not need to be executed because
7841                         # of an upgrade, display Blocker -> Upgrade edges since the
7842                         # corresponding Blocker -> Uninstall edges will not be shown.
7843                         upgrade_node = \
7844                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7845                         if upgrade_node is not None and \
7846                                 uninstall not in executed_uninstalls:
7847                                 for blocker in uninstall_parents:
7848                                         mygraph.add(upgrade_node, blocker)
7849
7850                 unsatisfied_blockers = []
7851                 i = 0
7852                 depth = 0
7853                 shown_edges = set()
7854                 for x in mylist:
7855                         if isinstance(x, Blocker) and not x.satisfied:
7856                                 unsatisfied_blockers.append(x)
7857                                 continue
7858                         graph_key = x
7859                         if "--tree" in self.myopts:
7860                                 depth = len(tree_nodes)
7861                                 while depth and graph_key not in \
7862                                         mygraph.child_nodes(tree_nodes[depth-1]):
7863                                                 depth -= 1
7864                                 if depth:
7865                                         tree_nodes = tree_nodes[:depth]
7866                                         tree_nodes.append(graph_key)
7867                                         display_list.append((x, depth, True))
7868                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7869                                 else:
7870                                         traversed_nodes = set() # prevent endless circles
7871                                         traversed_nodes.add(graph_key)
7872                                         def add_parents(current_node, ordered):
7873                                                 parent_nodes = None
7874                                                 # Do not traverse to parents if this node is an
7875                                                 # an argument or a direct member of a set that has
7876                                                 # been specified as an argument (system or world).
7877                                                 if current_node not in self._set_nodes:
7878                                                         parent_nodes = mygraph.parent_nodes(current_node)
7879                                                 if parent_nodes:
7880                                                         child_nodes = set(mygraph.child_nodes(current_node))
7881                                                         selected_parent = None
7882                                                         # First, try to avoid a direct cycle.
7883                                                         for node in parent_nodes:
7884                                                                 if not isinstance(node, (Blocker, Package)):
7885                                                                         continue
7886                                                                 if node not in traversed_nodes and \
7887                                                                         node not in child_nodes:
7888                                                                         edge = (current_node, node)
7889                                                                         if edge in shown_edges:
7890                                                                                 continue
7891                                                                         selected_parent = node
7892                                                                         break
7893                                                         if not selected_parent:
7894                                                                 # A direct cycle is unavoidable.
7895                                                                 for node in parent_nodes:
7896                                                                         if not isinstance(node, (Blocker, Package)):
7897                                                                                 continue
7898                                                                         if node not in traversed_nodes:
7899                                                                                 edge = (current_node, node)
7900                                                                                 if edge in shown_edges:
7901                                                                                         continue
7902                                                                                 selected_parent = node
7903                                                                                 break
7904                                                         if selected_parent:
7905                                                                 shown_edges.add((current_node, selected_parent))
7906                                                                 traversed_nodes.add(selected_parent)
7907                                                                 add_parents(selected_parent, False)
7908                                                 display_list.append((current_node,
7909                                                         len(tree_nodes), ordered))
7910                                                 tree_nodes.append(current_node)
7911                                         tree_nodes = []
7912                                         add_parents(graph_key, True)
7913                         else:
7914                                 display_list.append((x, depth, True))
7915                 mylist = display_list
7916                 for x in unsatisfied_blockers:
7917                         mylist.append((x, 0, True))
7918
7919                 last_merge_depth = 0
7920                 for i in xrange(len(mylist)-1,-1,-1):
7921                         graph_key, depth, ordered = mylist[i]
7922                         if not ordered and depth == 0 and i > 0 \
7923                                 and graph_key == mylist[i-1][0] and \
7924                                 mylist[i-1][1] == 0:
7925                                 # An ordered node got a consecutive duplicate when the tree was
7926                                 # being filled in.
7927                                 del mylist[i]
7928                                 continue
7929                         if ordered and graph_key[-1] != "nomerge":
7930                                 last_merge_depth = depth
7931                                 continue
7932                         if depth >= last_merge_depth or \
7933                                 i < len(mylist) - 1 and \
7934                                 depth >= mylist[i+1][1]:
7935                                         del mylist[i]
7936
7937                 from portage import flatten
7938                 from portage.dep import use_reduce, paren_reduce
7939                 # files to fetch list - avoids counting a same file twice
7940                 # in size display (verbose mode)
7941                 myfetchlist=[]
7942
7943                 # Use this set to detect when all the "repoadd" strings are "[0]"
7944                 # and disable the entire repo display in this case.
7945                 repoadd_set = set()
7946
7947                 for mylist_index in xrange(len(mylist)):
7948                         x, depth, ordered = mylist[mylist_index]
7949                         pkg_type = x[0]
7950                         myroot = x[1]
7951                         pkg_key = x[2]
7952                         portdb = self.trees[myroot]["porttree"].dbapi
7953                         bindb  = self.trees[myroot]["bintree"].dbapi
7954                         vardb = self.trees[myroot]["vartree"].dbapi
7955                         vartree = self.trees[myroot]["vartree"]
7956                         pkgsettings = self.pkgsettings[myroot]
7957
7958                         fetch=" "
7959                         indent = " " * depth
7960
7961                         if isinstance(x, Blocker):
7962                                 if x.satisfied:
7963                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7964                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7965                                 else:
7966                                         blocker_style = "PKG_BLOCKER"
7967                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7968                                 if ordered:
7969                                         counters.blocks += 1
7970                                         if x.satisfied:
7971                                                 counters.blocks_satisfied += 1
7972                                 resolved = portage.key_expand(
7973                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7974                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7975                                         addl += " " + colorize(blocker_style, resolved)
7976                                 else:
7977                                         addl = "[%s %s] %s%s" % \
7978                                                 (colorize(blocker_style, "blocks"),
7979                                                 addl, indent, colorize(blocker_style, resolved))
7980                                 block_parents = self._blocker_parents.parent_nodes(x)
7981                                 block_parents = set([pnode[2] for pnode in block_parents])
7982                                 block_parents = ", ".join(block_parents)
7983                                 if resolved!=x[2]:
7984                                         addl += colorize(blocker_style,
7985                                                 " (\"%s\" is blocking %s)") % \
7986                                                 (str(x.atom).lstrip("!"), block_parents)
7987                                 else:
7988                                         addl += colorize(blocker_style,
7989                                                 " (is blocking %s)") % block_parents
7990                                 if isinstance(x, Blocker) and x.satisfied:
7991                                         if columns:
7992                                                 continue
7993                                         p.append(addl)
7994                                 else:
7995                                         blockers.append(addl)
7996                         else:
7997                                 pkg_status = x[3]
7998                                 pkg_merge = ordered and pkg_status == "merge"
7999                                 if not pkg_merge and pkg_status == "merge":
8000                                         pkg_status = "nomerge"
8001                                 built = pkg_type != "ebuild"
8002                                 installed = pkg_type == "installed"
8003                                 pkg = x
8004                                 metadata = pkg.metadata
8005                                 ebuild_path = None
8006                                 repo_name = metadata["repository"]
8007                                 if pkg_type == "ebuild":
8008                                         ebuild_path = portdb.findname(pkg_key)
8009                                         if not ebuild_path: # shouldn't happen
8010                                                 raise portage.exception.PackageNotFound(pkg_key)
8011                                         repo_path_real = os.path.dirname(os.path.dirname(
8012                                                 os.path.dirname(ebuild_path)))
8013                                 else:
8014                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8015                                 pkg_use = list(pkg.use.enabled)
8016                                 try:
8017                                         restrict = flatten(use_reduce(paren_reduce(
8018                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8019                                 except portage.exception.InvalidDependString, e:
8020                                         if not pkg.installed:
8021                                                 show_invalid_depstring_notice(x,
8022                                                         pkg.metadata["RESTRICT"], str(e))
8023                                                 del e
8024                                                 return 1
8025                                         restrict = []
8026                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8027                                         "fetch" in restrict:
8028                                         fetch = red("F")
8029                                         if ordered:
8030                                                 counters.restrict_fetch += 1
8031                                         if portdb.fetch_check(pkg_key, pkg_use):
8032                                                 fetch = green("f")
8033                                                 if ordered:
8034                                                         counters.restrict_fetch_satisfied += 1
8035
8036                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8037                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8038                                 myoldbest = []
8039                                 myinslotlist = None
8040                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8041                                 if vardb.cpv_exists(pkg_key):
8042                                         addl="  "+yellow("R")+fetch+"  "
8043                                         if ordered:
8044                                                 if pkg_merge:
8045                                                         counters.reinst += 1
8046                                                 elif pkg_status == "uninstall":
8047                                                         counters.uninst += 1
8048                                 # filter out old-style virtual matches
8049                                 elif installed_versions and \
8050                                         portage.cpv_getkey(installed_versions[0]) == \
8051                                         portage.cpv_getkey(pkg_key):
8052                                         myinslotlist = vardb.match(pkg.slot_atom)
8053                                         # If this is the first install of a new-style virtual, we
8054                                         # need to filter out old-style virtual matches.
8055                                         if myinslotlist and \
8056                                                 portage.cpv_getkey(myinslotlist[0]) != \
8057                                                 portage.cpv_getkey(pkg_key):
8058                                                 myinslotlist = None
8059                                         if myinslotlist:
8060                                                 myoldbest = myinslotlist[:]
8061                                                 addl = "   " + fetch
8062                                                 if not portage.dep.cpvequal(pkg_key,
8063                                                         portage.best([pkg_key] + myoldbest)):
8064                                                         # Downgrade in slot
8065                                                         addl += turquoise("U")+blue("D")
8066                                                         if ordered:
8067                                                                 counters.downgrades += 1
8068                                                 else:
8069                                                         # Update in slot
8070                                                         addl += turquoise("U") + " "
8071                                                         if ordered:
8072                                                                 counters.upgrades += 1
8073                                         else:
8074                                                 # New slot, mark it new.
8075                                                 addl = " " + green("NS") + fetch + "  "
8076                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8077                                                 if ordered:
8078                                                         counters.newslot += 1
8079
8080                                         if "--changelog" in self.myopts:
8081                                                 inst_matches = vardb.match(pkg.slot_atom)
8082                                                 if inst_matches:
8083                                                         changelogs.extend(self.calc_changelog(
8084                                                                 portdb.findname(pkg_key),
8085                                                                 inst_matches[0], pkg_key))
8086                                 else:
8087                                         addl = " " + green("N") + " " + fetch + "  "
8088                                         if ordered:
8089                                                 counters.new += 1
8090
8091                                 verboseadd = ""
8092                                 repoadd = None
8093
8094                                 if True:
8095                                         # USE flag display
8096                                         forced_flags = set()
8097                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8098                                         forced_flags.update(pkgsettings.useforce)
8099                                         forced_flags.update(pkgsettings.usemask)
8100
8101                                         cur_use = [flag for flag in pkg.use.enabled \
8102                                                 if flag in pkg.iuse.all]
8103                                         cur_iuse = sorted(pkg.iuse.all)
8104
8105                                         if myoldbest and myinslotlist:
8106                                                 previous_cpv = myoldbest[0]
8107                                         else:
8108                                                 previous_cpv = pkg.cpv
8109                                         if vardb.cpv_exists(previous_cpv):
8110                                                 old_iuse, old_use = vardb.aux_get(
8111                                                                 previous_cpv, ["IUSE", "USE"])
8112                                                 old_iuse = list(set(
8113                                                         filter_iuse_defaults(old_iuse.split())))
8114                                                 old_iuse.sort()
8115                                                 old_use = old_use.split()
8116                                                 is_new = False
8117                                         else:
8118                                                 old_iuse = []
8119                                                 old_use = []
8120                                                 is_new = True
8121
8122                                         old_use = [flag for flag in old_use if flag in old_iuse]
8123
8124                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8125                                         use_expand.sort()
8126                                         use_expand.reverse()
8127                                         use_expand_hidden = \
8128                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8129
8130                                         def map_to_use_expand(myvals, forcedFlags=False,
8131                                                 removeHidden=True):
8132                                                 ret = {}
8133                                                 forced = {}
8134                                                 for exp in use_expand:
8135                                                         ret[exp] = []
8136                                                         forced[exp] = set()
8137                                                         for val in myvals[:]:
8138                                                                 if val.startswith(exp.lower()+"_"):
8139                                                                         if val in forced_flags:
8140                                                                                 forced[exp].add(val[len(exp)+1:])
8141                                                                         ret[exp].append(val[len(exp)+1:])
8142                                                                         myvals.remove(val)
8143                                                 ret["USE"] = myvals
8144                                                 forced["USE"] = [val for val in myvals \
8145                                                         if val in forced_flags]
8146                                                 if removeHidden:
8147                                                         for exp in use_expand_hidden:
8148                                                                 ret.pop(exp, None)
8149                                                 if forcedFlags:
8150                                                         return ret, forced
8151                                                 return ret
8152
8153                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8154                                         # are the only thing that triggered reinstallation.
8155                                         reinst_flags_map = {}
8156                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8157                                         reinst_expand_map = None
8158                                         if reinstall_for_flags:
8159                                                 reinst_flags_map = map_to_use_expand(
8160                                                         list(reinstall_for_flags), removeHidden=False)
8161                                                 for k in list(reinst_flags_map):
8162                                                         if not reinst_flags_map[k]:
8163                                                                 del reinst_flags_map[k]
8164                                                 if not reinst_flags_map.get("USE"):
8165                                                         reinst_expand_map = reinst_flags_map.copy()
8166                                                         reinst_expand_map.pop("USE", None)
8167                                         if reinst_expand_map and \
8168                                                 not set(reinst_expand_map).difference(
8169                                                 use_expand_hidden):
8170                                                 use_expand_hidden = \
8171                                                         set(use_expand_hidden).difference(
8172                                                         reinst_expand_map)
8173
8174                                         cur_iuse_map, iuse_forced = \
8175                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8176                                         cur_use_map = map_to_use_expand(cur_use)
8177                                         old_iuse_map = map_to_use_expand(old_iuse)
8178                                         old_use_map = map_to_use_expand(old_use)
8179
8180                                         use_expand.sort()
8181                                         use_expand.insert(0, "USE")
8182                                         
8183                                         for key in use_expand:
8184                                                 if key in use_expand_hidden:
8185                                                         continue
8186                                                 verboseadd += create_use_string(key.upper(),
8187                                                         cur_iuse_map[key], iuse_forced[key],
8188                                                         cur_use_map[key], old_iuse_map[key],
8189                                                         old_use_map[key], is_new,
8190                                                         reinst_flags_map.get(key))
8191
8192                                 if verbosity == 3:
8193                                         # size verbose
8194                                         mysize=0
8195                                         if pkg_type == "ebuild" and pkg_merge:
8196                                                 try:
8197                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8198                                                                 useflags=pkg_use, debug=self.edebug)
8199                                                 except portage.exception.InvalidDependString, e:
8200                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8201                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8202                                                         del e
8203                                                         return 1
8204                                                 if myfilesdict is None:
8205                                                         myfilesdict="[empty/missing/bad digest]"
8206                                                 else:
8207                                                         for myfetchfile in myfilesdict:
8208                                                                 if myfetchfile not in myfetchlist:
8209                                                                         mysize+=myfilesdict[myfetchfile]
8210                                                                         myfetchlist.append(myfetchfile)
8211                                                         if ordered:
8212                                                                 counters.totalsize += mysize
8213                                                 verboseadd += format_size(mysize)
8214
8215                                         # overlay verbose
8216                                         # assign index for a previous version in the same slot
8217                                         has_previous = False
8218                                         repo_name_prev = None
8219                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8220                                                 metadata["SLOT"])
8221                                         slot_matches = vardb.match(slot_atom)
8222                                         if slot_matches:
8223                                                 has_previous = True
8224                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8225                                                         ["repository"])[0]
8226
8227                                         # now use the data to generate output
8228                                         if pkg.installed or not has_previous:
8229                                                 repoadd = repo_display.repoStr(repo_path_real)
8230                                         else:
8231                                                 repo_path_prev = None
8232                                                 if repo_name_prev:
8233                                                         repo_path_prev = portdb.getRepositoryPath(
8234                                                                 repo_name_prev)
8235                                                 if repo_path_prev == repo_path_real:
8236                                                         repoadd = repo_display.repoStr(repo_path_real)
8237                                                 else:
8238                                                         repoadd = "%s=>%s" % (
8239                                                                 repo_display.repoStr(repo_path_prev),
8240                                                                 repo_display.repoStr(repo_path_real))
8241                                         if repoadd:
8242                                                 repoadd_set.add(repoadd)
8243
8244                                 xs = [portage.cpv_getkey(pkg_key)] + \
8245                                         list(portage.catpkgsplit(pkg_key)[2:])
8246                                 if xs[2] == "r0":
8247                                         xs[2] = ""
8248                                 else:
8249                                         xs[2] = "-" + xs[2]
8250
8251                                 mywidth = 130
8252                                 if "COLUMNWIDTH" in self.settings:
8253                                         try:
8254                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8255                                         except ValueError, e:
8256                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8257                                                 portage.writemsg(
8258                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8259                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8260                                                 del e
8261                                 oldlp = mywidth - 30
8262                                 newlp = oldlp - 30
8263
8264                                 # Convert myoldbest from a list to a string.
8265                                 if not myoldbest:
8266                                         myoldbest = ""
8267                                 else:
8268                                         for pos, key in enumerate(myoldbest):
8269                                                 key = portage.catpkgsplit(key)[2] + \
8270                                                         "-" + portage.catpkgsplit(key)[3]
8271                                                 if key[-3:] == "-r0":
8272                                                         key = key[:-3]
8273                                                 myoldbest[pos] = key
8274                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8275
8276                                 pkg_cp = xs[0]
8277                                 root_config = self.roots[myroot]
8278                                 system_set = root_config.sets["system"]
8279                                 world_set  = root_config.sets["world"]
8280
8281                                 pkg_system = False
8282                                 pkg_world = False
8283                                 try:
8284                                         pkg_system = system_set.findAtomForPackage(pkg)
8285                                         pkg_world  = world_set.findAtomForPackage(pkg)
8286                                         if not (oneshot or pkg_world) and \
8287                                                 myroot == self.target_root and \
8288                                                 favorites_set.findAtomForPackage(pkg):
8289                                                 # Maybe it will be added to world now.
8290                                                 if create_world_atom(pkg, favorites_set, root_config):
8291                                                         pkg_world = True
8292                                 except portage.exception.InvalidDependString:
8293                                         # This is reported elsewhere if relevant.
8294                                         pass
8295
8296                                 def pkgprint(pkg_str):
8297                                         if pkg_merge:
8298                                                 if pkg_system:
8299                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8300                                                 elif pkg_world:
8301                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8302                                                 else:
8303                                                         return colorize("PKG_MERGE", pkg_str)
8304                                         elif pkg_status == "uninstall":
8305                                                 return colorize("PKG_UNINSTALL", pkg_str)
8306                                         else:
8307                                                 if pkg_system:
8308                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8309                                                 elif pkg_world:
8310                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8311                                                 else:
8312                                                         return colorize("PKG_NOMERGE", pkg_str)
8313
8314                                 try:
8315                                         properties = flatten(use_reduce(paren_reduce(
8316                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8317                                 except portage.exception.InvalidDependString, e:
8318                                         if not pkg.installed:
8319                                                 show_invalid_depstring_notice(pkg,
8320                                                         pkg.metadata["PROPERTIES"], str(e))
8321                                                 del e
8322                                                 return 1
8323                                         properties = []
8324                                 interactive = "interactive" in properties
8325                                 if interactive and pkg.operation == "merge":
8326                                         addl = colorize("WARN", "I") + addl[1:]
8327                                         if ordered:
8328                                                 counters.interactive += 1
8329
8330                                 if x[1]!="/":
8331                                         if myoldbest:
8332                                                 myoldbest +=" "
8333                                         if "--columns" in self.myopts:
8334                                                 if "--quiet" in self.myopts:
8335                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8336                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8337                                                         myprint=myprint+myoldbest
8338                                                         myprint=myprint+darkgreen("to "+x[1])
8339                                                         verboseadd = None
8340                                                 else:
8341                                                         if not pkg_merge:
8342                                                                 myprint = "[%s] %s%s" % \
8343                                                                         (pkgprint(pkg_status.ljust(13)),
8344                                                                         indent, pkgprint(pkg.cp))
8345                                                         else:
8346                                                                 myprint = "[%s %s] %s%s" % \
8347                                                                         (pkgprint(pkg.type_name), addl,
8348                                                                         indent, pkgprint(pkg.cp))
8349                                                         if (newlp-nc_len(myprint)) > 0:
8350                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8351                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8352                                                         if (oldlp-nc_len(myprint)) > 0:
8353                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8354                                                         myprint=myprint+myoldbest
8355                                                         myprint += darkgreen("to " + pkg.root)
8356                                         else:
8357                                                 if not pkg_merge:
8358                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8359                                                 else:
8360                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8361                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8362                                                         myoldbest + darkgreen("to " + myroot)
8363                                 else:
8364                                         if "--columns" in self.myopts:
8365                                                 if "--quiet" in self.myopts:
8366                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8367                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8368                                                         myprint=myprint+myoldbest
8369                                                         verboseadd = None
8370                                                 else:
8371                                                         if not pkg_merge:
8372                                                                 myprint = "[%s] %s%s" % \
8373                                                                         (pkgprint(pkg_status.ljust(13)),
8374                                                                         indent, pkgprint(pkg.cp))
8375                                                         else:
8376                                                                 myprint = "[%s %s] %s%s" % \
8377                                                                         (pkgprint(pkg.type_name), addl,
8378                                                                         indent, pkgprint(pkg.cp))
8379                                                         if (newlp-nc_len(myprint)) > 0:
8380                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8381                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8382                                                         if (oldlp-nc_len(myprint)) > 0:
8383                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8384                                                         myprint += myoldbest
8385                                         else:
8386                                                 if not pkg_merge:
8387                                                         myprint = "[%s] %s%s %s" % \
8388                                                                 (pkgprint(pkg_status.ljust(13)),
8389                                                                 indent, pkgprint(pkg.cpv),
8390                                                                 myoldbest)
8391                                                 else:
8392                                                         myprint = "[%s %s] %s%s %s" % \
8393                                                                 (pkgprint(pkg_type), addl, indent,
8394                                                                 pkgprint(pkg.cpv), myoldbest)
8395
8396                                 if columns and pkg.operation == "uninstall":
8397                                         continue
8398                                 p.append((myprint, verboseadd, repoadd))
8399
8400                                 if "--tree" not in self.myopts and \
8401                                         "--quiet" not in self.myopts and \
8402                                         not self._opts_no_restart.intersection(self.myopts) and \
8403                                         pkg.root == self._running_root.root and \
8404                                         portage.match_from_list(
8405                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8406                                         not vardb.cpv_exists(pkg.cpv) and \
8407                                         "--quiet" not in self.myopts:
8408                                                 if mylist_index < len(mylist) - 1:
8409                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8410                                                         p.append(colorize("WARN", "    then resume the merge."))
8411
8412                 out = sys.stdout
8413                 show_repos = repoadd_set and repoadd_set != set(["0"])
8414
8415                 for x in p:
8416                         if isinstance(x, basestring):
8417                                 out.write("%s\n" % (x,))
8418                                 continue
8419
8420                         myprint, verboseadd, repoadd = x
8421
8422                         if verboseadd:
8423                                 myprint += " " + verboseadd
8424
8425                         if show_repos and repoadd:
8426                                 myprint += " " + teal("[%s]" % repoadd)
8427
8428                         out.write("%s\n" % (myprint,))
8429
8430                 for x in blockers:
8431                         print x
8432
8433                 if verbosity == 3:
8434                         print
8435                         print counters
8436                         if show_repos:
8437                                 sys.stdout.write(str(repo_display))
8438
8439                 if "--changelog" in self.myopts:
8440                         print
8441                         for revision,text in changelogs:
8442                                 print bold('*'+revision)
8443                                 sys.stdout.write(text)
8444
8445                 sys.stdout.flush()
8446                 return os.EX_OK
8447
8448         def display_problems(self):
8449                 """
8450                 Display problems with the dependency graph such as slot collisions.
8451                 This is called internally by display() to show the problems _after_
8452                 the merge list where it is most likely to be seen, but if display()
8453                 is not going to be called then this method should be called explicitly
8454                 to ensure that the user is notified of problems with the graph.
8455
8456                 All output goes to stderr, except for unsatisfied dependencies which
8457                 go to stdout for parsing by programs such as autounmask.
8458                 """
8459
8460                 # Note that show_masked_packages() sends it's output to
8461                 # stdout, and some programs such as autounmask parse the
8462                 # output in cases when emerge bails out. However, when
8463                 # show_masked_packages() is called for installed packages
8464                 # here, the message is a warning that is more appropriate
8465                 # to send to stderr, so temporarily redirect stdout to
8466                 # stderr. TODO: Fix output code so there's a cleaner way
8467                 # to redirect everything to stderr.
8468                 sys.stdout.flush()
8469                 sys.stderr.flush()
8470                 stdout = sys.stdout
8471                 try:
8472                         sys.stdout = sys.stderr
8473                         self._display_problems()
8474                 finally:
8475                         sys.stdout = stdout
8476                         sys.stdout.flush()
8477                         sys.stderr.flush()
8478
8479                 # This goes to stdout for parsing by programs like autounmask.
8480                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8481                         self._show_unsatisfied_dep(*pargs, **kwargs)
8482
8483         def _display_problems(self):
8484                 if self._circular_deps_for_display is not None:
8485                         self._show_circular_deps(
8486                                 self._circular_deps_for_display)
8487
8488                 # The user is only notified of a slot conflict if
8489                 # there are no unresolvable blocker conflicts.
8490                 if self._unsatisfied_blockers_for_display is not None:
8491                         self._show_unsatisfied_blockers(
8492                                 self._unsatisfied_blockers_for_display)
8493                 else:
8494                         self._show_slot_collision_notice()
8495
8496                 # TODO: Add generic support for "set problem" handlers so that
8497                 # the below warnings aren't special cases for world only.
8498
8499                 if self._missing_args:
8500                         world_problems = False
8501                         if "world" in self._sets:
8502                                 # Filter out indirect members of world (from nested sets)
8503                                 # since only direct members of world are desired here.
8504                                 world_set = self.roots[self.target_root].sets["world"]
8505                                 for arg, atom in self._missing_args:
8506                                         if arg.name == "world" and atom in world_set:
8507                                                 world_problems = True
8508                                                 break
8509
8510                         if world_problems:
8511                                 sys.stderr.write("\n!!! Problems have been " + \
8512                                         "detected with your world file\n")
8513                                 sys.stderr.write("!!! Please run " + \
8514                                         green("emaint --check world")+"\n\n")
8515
8516                 if self._missing_args:
8517                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8518                                 " Ebuilds for the following packages are either all\n")
8519                         sys.stderr.write(colorize("BAD", "!!!") + \
8520                                 " masked or don't exist:\n")
8521                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8522                                 self._missing_args) + "\n")
8523
8524                 if self._pprovided_args:
8525                         arg_refs = {}
8526                         for arg, atom in self._pprovided_args:
8527                                 if isinstance(arg, SetArg):
8528                                         parent = arg.name
8529                                         arg_atom = (atom, atom)
8530                                 else:
8531                                         parent = "args"
8532                                         arg_atom = (arg.arg, atom)
8533                                 refs = arg_refs.setdefault(arg_atom, [])
8534                                 if parent not in refs:
8535                                         refs.append(parent)
8536                         msg = []
8537                         msg.append(bad("\nWARNING: "))
8538                         if len(self._pprovided_args) > 1:
8539                                 msg.append("Requested packages will not be " + \
8540                                         "merged because they are listed in\n")
8541                         else:
8542                                 msg.append("A requested package will not be " + \
8543                                         "merged because it is listed in\n")
8544                         msg.append("package.provided:\n\n")
8545                         problems_sets = set()
8546                         for (arg, atom), refs in arg_refs.iteritems():
8547                                 ref_string = ""
8548                                 if refs:
8549                                         problems_sets.update(refs)
8550                                         refs.sort()
8551                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8552                                         ref_string = " pulled in by " + ref_string
8553                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8554                         msg.append("\n")
8555                         if "world" in problems_sets:
8556                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8557                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8558                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8559                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8560                                 msg.append("The best course of action depends on the reason that an offending\n")
8561                                 msg.append("package.provided entry exists.\n\n")
8562                         sys.stderr.write("".join(msg))
8563
8564                 masked_packages = []
8565                 for pkg in self._masked_installed:
8566                         root_config = pkg.root_config
8567                         pkgsettings = self.pkgsettings[pkg.root]
8568                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8569                         masked_packages.append((root_config, pkgsettings,
8570                                 pkg.cpv, pkg.metadata, mreasons))
8571                 if masked_packages:
8572                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8573                                 " The following installed packages are masked:\n")
8574                         show_masked_packages(masked_packages)
8575                         show_mask_docs()
8576                         print
8577
8578         def calc_changelog(self,ebuildpath,current,next):
8579                 if ebuildpath == None or not os.path.exists(ebuildpath):
8580                         return []
8581                 current = '-'.join(portage.catpkgsplit(current)[1:])
8582                 if current.endswith('-r0'):
8583                         current = current[:-3]
8584                 next = '-'.join(portage.catpkgsplit(next)[1:])
8585                 if next.endswith('-r0'):
8586                         next = next[:-3]
8587                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8588                 try:
8589                         changelog = open(changelogpath).read()
8590                 except SystemExit, e:
8591                         raise # Needed else can't exit
8592                 except:
8593                         return []
8594                 divisions = self.find_changelog_tags(changelog)
8595                 #print 'XX from',current,'to',next
8596                 #for div,text in divisions: print 'XX',div
8597                 # skip entries for all revisions above the one we are about to emerge
8598                 for i in range(len(divisions)):
8599                         if divisions[i][0]==next:
8600                                 divisions = divisions[i:]
8601                                 break
8602                 # find out how many entries we are going to display
8603                 for i in range(len(divisions)):
8604                         if divisions[i][0]==current:
8605                                 divisions = divisions[:i]
8606                                 break
8607                 else:
8608                     # couldnt find the current revision in the list. display nothing
8609                         return []
8610                 return divisions
8611
8612         def find_changelog_tags(self,changelog):
8613                 divs = []
8614                 release = None
8615                 while 1:
8616                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8617                         if match is None:
8618                                 if release is not None:
8619                                         divs.append((release,changelog))
8620                                 return divs
8621                         if release is not None:
8622                                 divs.append((release,changelog[:match.start()]))
8623                         changelog = changelog[match.end():]
8624                         release = match.group(1)
8625                         if release.endswith('.ebuild'):
8626                                 release = release[:-7]
8627                         if release.endswith('-r0'):
8628                                 release = release[:-3]
8629
8630         def saveNomergeFavorites(self):
8631                 """Find atoms in favorites that are not in the mergelist and add them
8632                 to the world file if necessary."""
8633                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8634                         "--oneshot", "--onlydeps", "--pretend"):
8635                         if x in self.myopts:
8636                                 return
8637                 root_config = self.roots[self.target_root]
8638                 world_set = root_config.sets["world"]
8639
8640                 world_locked = False
8641                 if hasattr(world_set, "lock"):
8642                         world_set.lock()
8643                         world_locked = True
8644
8645                 if hasattr(world_set, "load"):
8646                         world_set.load() # maybe it's changed on disk
8647
8648                 args_set = self._sets["args"]
8649                 portdb = self.trees[self.target_root]["porttree"].dbapi
8650                 added_favorites = set()
8651                 for x in self._set_nodes:
8652                         pkg_type, root, pkg_key, pkg_status = x
8653                         if pkg_status != "nomerge":
8654                                 continue
8655
8656                         try:
8657                                 myfavkey = create_world_atom(x, args_set, root_config)
8658                                 if myfavkey:
8659                                         if myfavkey in added_favorites:
8660                                                 continue
8661                                         added_favorites.add(myfavkey)
8662                         except portage.exception.InvalidDependString, e:
8663                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8664                                         (pkg_key, str(e)), noiselevel=-1)
8665                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8666                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8667                                 del e
8668                 all_added = []
8669                 for k in self._sets:
8670                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8671                                 continue
8672                         s = SETPREFIX + k
8673                         if s in world_set:
8674                                 continue
8675                         all_added.append(SETPREFIX + k)
8676                 all_added.extend(added_favorites)
8677                 all_added.sort()
8678                 for a in all_added:
8679                         print ">>> Recording %s in \"world\" favorites file..." % \
8680                                 colorize("INFORM", str(a))
8681                 if all_added:
8682                         world_set.update(all_added)
8683
8684                 if world_locked:
8685                         world_set.unlock()
8686
8687         def loadResumeCommand(self, resume_data, skip_masked=False):
8688                 """
8689                 Add a resume command to the graph and validate it in the process.  This
8690                 will raise a PackageNotFound exception if a package is not available.
8691                 """
8692
8693                 if not isinstance(resume_data, dict):
8694                         return False
8695
8696                 mergelist = resume_data.get("mergelist")
8697                 if not isinstance(mergelist, list):
8698                         mergelist = []
8699
8700                 fakedb = self.mydbapi
8701                 trees = self.trees
8702                 serialized_tasks = []
8703                 masked_tasks = []
8704                 for x in mergelist:
8705                         if not (isinstance(x, list) and len(x) == 4):
8706                                 continue
8707                         pkg_type, myroot, pkg_key, action = x
8708                         if pkg_type not in self.pkg_tree_map:
8709                                 continue
8710                         if action != "merge":
8711                                 continue
8712                         tree_type = self.pkg_tree_map[pkg_type]
8713                         mydb = trees[myroot][tree_type].dbapi
8714                         db_keys = list(self._trees_orig[myroot][
8715                                 tree_type].dbapi._aux_cache_keys)
8716                         try:
8717                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8718                         except KeyError:
8719                                 # It does no exist or it is corrupt.
8720                                 if action == "uninstall":
8721                                         continue
8722                                 raise portage.exception.PackageNotFound(pkg_key)
8723                         installed = action == "uninstall"
8724                         built = pkg_type != "ebuild"
8725                         root_config = self.roots[myroot]
8726                         pkg = Package(built=built, cpv=pkg_key,
8727                                 installed=installed, metadata=metadata,
8728                                 operation=action, root_config=root_config,
8729                                 type_name=pkg_type)
8730                         if pkg_type == "ebuild":
8731                                 pkgsettings = self.pkgsettings[myroot]
8732                                 pkgsettings.setcpv(pkg)
8733                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8734                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8735                         self._pkg_cache[pkg] = pkg
8736
8737                         root_config = self.roots[pkg.root]
8738                         if "merge" == pkg.operation and \
8739                                 not visible(root_config.settings, pkg):
8740                                 if skip_masked:
8741                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8742                                 else:
8743                                         self._unsatisfied_deps_for_display.append(
8744                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8745
8746                         fakedb[myroot].cpv_inject(pkg)
8747                         serialized_tasks.append(pkg)
8748                         self.spinner.update()
8749
8750                 if self._unsatisfied_deps_for_display:
8751                         return False
8752
8753                 if not serialized_tasks or "--nodeps" in self.myopts:
8754                         self._serialized_tasks_cache = serialized_tasks
8755                         self._scheduler_graph = self.digraph
8756                 else:
8757                         self._select_package = self._select_pkg_from_graph
8758                         self.myparams.add("selective")
8759                         # Always traverse deep dependencies in order to account for
8760                         # potentially unsatisfied dependencies of installed packages.
8761                         # This is necessary for correct --keep-going or --resume operation
8762                         # in case a package from a group of circularly dependent packages
8763                         # fails. In this case, a package which has recently been installed
8764                         # may have an unsatisfied circular dependency (pulled in by
8765                         # PDEPEND, for example). So, even though a package is already
8766                         # installed, it may not have all of it's dependencies satisfied, so
8767                         # it may not be usable. If such a package is in the subgraph of
8768                         # deep depenedencies of a scheduled build, that build needs to
8769                         # be cancelled. In order for this type of situation to be
8770                         # recognized, deep traversal of dependencies is required.
8771                         self.myparams.add("deep")
8772
8773                         favorites = resume_data.get("favorites")
8774                         args_set = self._sets["args"]
8775                         if isinstance(favorites, list):
8776                                 args = self._load_favorites(favorites)
8777                         else:
8778                                 args = []
8779
8780                         for task in serialized_tasks:
8781                                 if isinstance(task, Package) and \
8782                                         task.operation == "merge":
8783                                         if not self._add_pkg(task, None):
8784                                                 return False
8785
8786                         # Packages for argument atoms need to be explicitly
8787                         # added via _add_pkg() so that they are included in the
8788                         # digraph (needed at least for --tree display).
8789                         for arg in args:
8790                                 for atom in arg.set:
8791                                         pkg, existing_node = self._select_package(
8792                                                 arg.root_config.root, atom)
8793                                         if existing_node is None and \
8794                                                 pkg is not None:
8795                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8796                                                         root=pkg.root, parent=arg)):
8797                                                         return False
8798
8799                         # Allow unsatisfied deps here to avoid showing a masking
8800                         # message for an unsatisfied dep that isn't necessarily
8801                         # masked.
8802                         if not self._create_graph(allow_unsatisfied=True):
8803                                 return False
8804
8805                         unsatisfied_deps = []
8806                         for dep in self._unsatisfied_deps:
8807                                 if not isinstance(dep.parent, Package):
8808                                         continue
8809                                 if dep.parent.operation == "merge":
8810                                         unsatisfied_deps.append(dep)
8811                                         continue
8812
8813                                 # For unsatisfied deps of installed packages, only account for
8814                                 # them if they are in the subgraph of dependencies of a package
8815                                 # which is scheduled to be installed.
8816                                 unsatisfied_install = False
8817                                 traversed = set()
8818                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8819                                 while dep_stack:
8820                                         node = dep_stack.pop()
8821                                         if not isinstance(node, Package):
8822                                                 continue
8823                                         if node.operation == "merge":
8824                                                 unsatisfied_install = True
8825                                                 break
8826                                         if node in traversed:
8827                                                 continue
8828                                         traversed.add(node)
8829                                         dep_stack.extend(self.digraph.parent_nodes(node))
8830
8831                                 if unsatisfied_install:
8832                                         unsatisfied_deps.append(dep)
8833
8834                         if masked_tasks or unsatisfied_deps:
8835                                 # This probably means that a required package
8836                                 # was dropped via --skipfirst. It makes the
8837                                 # resume list invalid, so convert it to a
8838                                 # UnsatisfiedResumeDep exception.
8839                                 raise self.UnsatisfiedResumeDep(self,
8840                                         masked_tasks + unsatisfied_deps)
8841                         self._serialized_tasks_cache = None
8842                         try:
8843                                 self.altlist()
8844                         except self._unknown_internal_error:
8845                                 return False
8846
8847                 return True
8848
8849         def _load_favorites(self, favorites):
8850                 """
8851                 Use a list of favorites to resume state from a
8852                 previous select_files() call. This creates similar
8853                 DependencyArg instances to those that would have
8854                 been created by the original select_files() call.
8855                 This allows Package instances to be matched with
8856                 DependencyArg instances during graph creation.
8857                 """
8858                 root_config = self.roots[self.target_root]
8859                 getSetAtoms = root_config.setconfig.getSetAtoms
8860                 sets = root_config.sets
8861                 args = []
8862                 for x in favorites:
8863                         if not isinstance(x, basestring):
8864                                 continue
8865                         if x in ("system", "world"):
8866                                 x = SETPREFIX + x
8867                         if x.startswith(SETPREFIX):
8868                                 s = x[len(SETPREFIX):]
8869                                 if s not in sets:
8870                                         continue
8871                                 if s in self._sets:
8872                                         continue
8873                                 # Recursively expand sets so that containment tests in
8874                                 # self._get_parent_sets() properly match atoms in nested
8875                                 # sets (like if world contains system).
8876                                 expanded_set = InternalPackageSet(
8877                                         initial_atoms=getSetAtoms(s))
8878                                 self._sets[s] = expanded_set
8879                                 args.append(SetArg(arg=x, set=expanded_set,
8880                                         root_config=root_config))
8881                         else:
8882                                 if not portage.isvalidatom(x):
8883                                         continue
8884                                 args.append(AtomArg(arg=x, atom=x,
8885                                         root_config=root_config))
8886
8887                 self._set_args(args)
8888                 return args
8889
8890         class UnsatisfiedResumeDep(portage.exception.PortageException):
8891                 """
8892                 A dependency of a resume list is not installed. This
8893                 can occur when a required package is dropped from the
8894                 merge list via --skipfirst.
8895                 """
8896                 def __init__(self, depgraph, value):
8897                         portage.exception.PortageException.__init__(self, value)
8898                         self.depgraph = depgraph
8899
8900         class _internal_exception(portage.exception.PortageException):
8901                 def __init__(self, value=""):
8902                         portage.exception.PortageException.__init__(self, value)
8903
8904         class _unknown_internal_error(_internal_exception):
8905                 """
8906                 Used by the depgraph internally to terminate graph creation.
8907                 The specific reason for the failure should have been dumped
8908                 to stderr, unfortunately, the exact reason for the failure
8909                 may not be known.
8910                 """
8911
8912         class _serialize_tasks_retry(_internal_exception):
8913                 """
8914                 This is raised by the _serialize_tasks() method when it needs to
8915                 be called again for some reason. The only case that it's currently
8916                 used for is when neglected dependencies need to be added to the
8917                 graph in order to avoid making a potentially unsafe decision.
8918                 """
8919
8920         class _dep_check_composite_db(portage.dbapi):
8921                 """
8922                 A dbapi-like interface that is optimized for use in dep_check() calls.
8923                 This is built on top of the existing depgraph package selection logic.
8924                 Some packages that have been added to the graph may be masked from this
8925                 view in order to influence the atom preference selection that occurs
8926                 via dep_check().
8927                 """
8928                 def __init__(self, depgraph, root):
8929                         portage.dbapi.__init__(self)
8930                         self._depgraph = depgraph
8931                         self._root = root
8932                         self._match_cache = {}
8933                         self._cpv_pkg_map = {}
8934
8935                 def _clear_cache(self):
8936                         self._match_cache.clear()
8937                         self._cpv_pkg_map.clear()
8938
8939                 def match(self, atom):
8940                         ret = self._match_cache.get(atom)
8941                         if ret is not None:
8942                                 return ret[:]
8943                         orig_atom = atom
8944                         if "/" not in atom:
8945                                 atom = self._dep_expand(atom)
8946                         pkg, existing = self._depgraph._select_package(self._root, atom)
8947                         if not pkg:
8948                                 ret = []
8949                         else:
8950                                 # Return the highest available from select_package() as well as
8951                                 # any matching slots in the graph db.
8952                                 slots = set()
8953                                 slots.add(pkg.metadata["SLOT"])
8954                                 atom_cp = portage.dep_getkey(atom)
8955                                 if pkg.cp.startswith("virtual/"):
8956                                         # For new-style virtual lookahead that occurs inside
8957                                         # dep_check(), examine all slots. This is needed
8958                                         # so that newer slots will not unnecessarily be pulled in
8959                                         # when a satisfying lower slot is already installed. For
8960                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8961                                         # there's no need to pull in a newer slot to satisfy a
8962                                         # virtual/jdk dependency.
8963                                         for db, pkg_type, built, installed, db_keys in \
8964                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8965                                                 for cpv in db.match(atom):
8966                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8967                                                                 continue
8968                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8969                                 ret = []
8970                                 if self._visible(pkg):
8971                                         self._cpv_pkg_map[pkg.cpv] = pkg
8972                                         ret.append(pkg.cpv)
8973                                 slots.remove(pkg.metadata["SLOT"])
8974                                 while slots:
8975                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8976                                         pkg, existing = self._depgraph._select_package(
8977                                                 self._root, slot_atom)
8978                                         if not pkg:
8979                                                 continue
8980                                         if not self._visible(pkg):
8981                                                 continue
8982                                         self._cpv_pkg_map[pkg.cpv] = pkg
8983                                         ret.append(pkg.cpv)
8984                                 if ret:
8985                                         self._cpv_sort_ascending(ret)
8986                         self._match_cache[orig_atom] = ret
8987                         return ret[:]
8988
8989                 def _visible(self, pkg):
8990                         if pkg.installed and "selective" not in self._depgraph.myparams:
8991                                 try:
8992                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8993                                 except (StopIteration, portage.exception.InvalidDependString):
8994                                         arg = None
8995                                 if arg:
8996                                         return False
8997                         if pkg.installed:
8998                                 try:
8999                                         if not visible(
9000                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9001                                                 return False
9002                                 except portage.exception.InvalidDependString:
9003                                         pass
9004                         in_graph = self._depgraph._slot_pkg_map[
9005                                 self._root].get(pkg.slot_atom)
9006                         if in_graph is None:
9007                                 # Mask choices for packages which are not the highest visible
9008                                 # version within their slot (since they usually trigger slot
9009                                 # conflicts).
9010                                 highest_visible, in_graph = self._depgraph._select_package(
9011                                         self._root, pkg.slot_atom)
9012                                 if pkg != highest_visible:
9013                                         return False
9014                         elif in_graph != pkg:
9015                                 # Mask choices for packages that would trigger a slot
9016                                 # conflict with a previously selected package.
9017                                 return False
9018                         return True
9019
9020                 def _dep_expand(self, atom):
9021                         """
9022                         This is only needed for old installed packages that may
9023                         contain atoms that are not fully qualified with a specific
9024                         category. Emulate the cpv_expand() function that's used by
9025                         dbapi.match() in cases like this. If there are multiple
9026                         matches, it's often due to a new-style virtual that has
9027                         been added, so try to filter those out to avoid raising
9028                         a ValueError.
9029                         """
9030                         root_config = self._depgraph.roots[self._root]
9031                         orig_atom = atom
9032                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9033                         if len(expanded_atoms) > 1:
9034                                 non_virtual_atoms = []
9035                                 for x in expanded_atoms:
9036                                         if not portage.dep_getkey(x).startswith("virtual/"):
9037                                                 non_virtual_atoms.append(x)
9038                                 if len(non_virtual_atoms) == 1:
9039                                         expanded_atoms = non_virtual_atoms
9040                         if len(expanded_atoms) > 1:
9041                                 # compatible with portage.cpv_expand()
9042                                 raise portage.exception.AmbiguousPackageName(
9043                                         [portage.dep_getkey(x) for x in expanded_atoms])
9044                         if expanded_atoms:
9045                                 atom = expanded_atoms[0]
9046                         else:
9047                                 null_atom = insert_category_into_atom(atom, "null")
9048                                 null_cp = portage.dep_getkey(null_atom)
9049                                 cat, atom_pn = portage.catsplit(null_cp)
9050                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9051                                 if virts_p:
9052                                         # Allow the resolver to choose which virtual.
9053                                         atom = insert_category_into_atom(atom, "virtual")
9054                                 else:
9055                                         atom = insert_category_into_atom(atom, "null")
9056                         return atom
9057
9058                 def aux_get(self, cpv, wants):
9059                         metadata = self._cpv_pkg_map[cpv].metadata
9060                         return [metadata.get(x, "") for x in wants]
9061
9062 class RepoDisplay(object):
9063         def __init__(self, roots):
9064                 self._shown_repos = {}
9065                 self._unknown_repo = False
9066                 repo_paths = set()
9067                 for root_config in roots.itervalues():
9068                         portdir = root_config.settings.get("PORTDIR")
9069                         if portdir:
9070                                 repo_paths.add(portdir)
9071                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9072                         if overlays:
9073                                 repo_paths.update(overlays.split())
9074                 repo_paths = list(repo_paths)
9075                 self._repo_paths = repo_paths
9076                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9077                         for repo_path in repo_paths ]
9078
9079                 # pre-allocate index for PORTDIR so that it always has index 0.
9080                 for root_config in roots.itervalues():
9081                         portdb = root_config.trees["porttree"].dbapi
9082                         portdir = portdb.porttree_root
9083                         if portdir:
9084                                 self.repoStr(portdir)
9085
9086         def repoStr(self, repo_path_real):
9087                 real_index = -1
9088                 if repo_path_real:
9089                         real_index = self._repo_paths_real.index(repo_path_real)
9090                 if real_index == -1:
9091                         s = "?"
9092                         self._unknown_repo = True
9093                 else:
9094                         shown_repos = self._shown_repos
9095                         repo_paths = self._repo_paths
9096                         repo_path = repo_paths[real_index]
9097                         index = shown_repos.get(repo_path)
9098                         if index is None:
9099                                 index = len(shown_repos)
9100                                 shown_repos[repo_path] = index
9101                         s = str(index)
9102                 return s
9103
9104         def __str__(self):
9105                 output = []
9106                 shown_repos = self._shown_repos
9107                 unknown_repo = self._unknown_repo
9108                 if shown_repos or self._unknown_repo:
9109                         output.append("Portage tree and overlays:\n")
9110                 show_repo_paths = list(shown_repos)
9111                 for repo_path, repo_index in shown_repos.iteritems():
9112                         show_repo_paths[repo_index] = repo_path
9113                 if show_repo_paths:
9114                         for index, repo_path in enumerate(show_repo_paths):
9115                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9116                 if unknown_repo:
9117                         output.append(" "+teal("[?]") + \
9118                                 " indicates that the source repository could not be determined\n")
9119                 return "".join(output)
9120
9121 class PackageCounters(object):
9122
9123         def __init__(self):
9124                 self.upgrades   = 0
9125                 self.downgrades = 0
9126                 self.new        = 0
9127                 self.newslot    = 0
9128                 self.reinst     = 0
9129                 self.uninst     = 0
9130                 self.blocks     = 0
9131                 self.blocks_satisfied         = 0
9132                 self.totalsize  = 0
9133                 self.restrict_fetch           = 0
9134                 self.restrict_fetch_satisfied = 0
9135                 self.interactive              = 0
9136
9137         def __str__(self):
9138                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9139                 myoutput = []
9140                 details = []
9141                 myoutput.append("Total: %s package" % total_installs)
9142                 if total_installs != 1:
9143                         myoutput.append("s")
9144                 if total_installs != 0:
9145                         myoutput.append(" (")
9146                 if self.upgrades > 0:
9147                         details.append("%s upgrade" % self.upgrades)
9148                         if self.upgrades > 1:
9149                                 details[-1] += "s"
9150                 if self.downgrades > 0:
9151                         details.append("%s downgrade" % self.downgrades)
9152                         if self.downgrades > 1:
9153                                 details[-1] += "s"
9154                 if self.new > 0:
9155                         details.append("%s new" % self.new)
9156                 if self.newslot > 0:
9157                         details.append("%s in new slot" % self.newslot)
9158                         if self.newslot > 1:
9159                                 details[-1] += "s"
9160                 if self.reinst > 0:
9161                         details.append("%s reinstall" % self.reinst)
9162                         if self.reinst > 1:
9163                                 details[-1] += "s"
9164                 if self.uninst > 0:
9165                         details.append("%s uninstall" % self.uninst)
9166                         if self.uninst > 1:
9167                                 details[-1] += "s"
9168                 if self.interactive > 0:
9169                         details.append("%s %s" % (self.interactive,
9170                                 colorize("WARN", "interactive")))
9171                 myoutput.append(", ".join(details))
9172                 if total_installs != 0:
9173                         myoutput.append(")")
9174                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9175                 if self.restrict_fetch:
9176                         myoutput.append("\nFetch Restriction: %s package" % \
9177                                 self.restrict_fetch)
9178                         if self.restrict_fetch > 1:
9179                                 myoutput.append("s")
9180                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9181                         myoutput.append(bad(" (%s unsatisfied)") % \
9182                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9183                 if self.blocks > 0:
9184                         myoutput.append("\nConflict: %s block" % \
9185                                 self.blocks)
9186                         if self.blocks > 1:
9187                                 myoutput.append("s")
9188                         if self.blocks_satisfied < self.blocks:
9189                                 myoutput.append(bad(" (%s unsatisfied)") % \
9190                                         (self.blocks - self.blocks_satisfied))
9191                 return "".join(myoutput)
9192
9193 class PollSelectAdapter(PollConstants):
9194
9195         """
9196         Use select to emulate a poll object, for
9197         systems that don't support poll().
9198         """
9199
9200         def __init__(self):
9201                 self._registered = {}
9202                 self._select_args = [[], [], []]
9203
9204         def register(self, fd, *args):
9205                 """
9206                 Only POLLIN is currently supported!
9207                 """
9208                 if len(args) > 1:
9209                         raise TypeError(
9210                                 "register expected at most 2 arguments, got " + \
9211                                 repr(1 + len(args)))
9212
9213                 eventmask = PollConstants.POLLIN | \
9214                         PollConstants.POLLPRI | PollConstants.POLLOUT
9215                 if args:
9216                         eventmask = args[0]
9217
9218                 self._registered[fd] = eventmask
9219                 self._select_args = None
9220
9221         def unregister(self, fd):
9222                 self._select_args = None
9223                 del self._registered[fd]
9224
9225         def poll(self, *args):
9226                 if len(args) > 1:
9227                         raise TypeError(
9228                                 "poll expected at most 2 arguments, got " + \
9229                                 repr(1 + len(args)))
9230
9231                 timeout = None
9232                 if args:
9233                         timeout = args[0]
9234
9235                 select_args = self._select_args
9236                 if select_args is None:
9237                         select_args = [self._registered.keys(), [], []]
9238
9239                 if timeout is not None:
9240                         select_args = select_args[:]
9241                         # Translate poll() timeout args to select() timeout args:
9242                         #
9243                         #          | units        | value(s) for indefinite block
9244                         # ---------|--------------|------------------------------
9245                         #   poll   | milliseconds | omitted, negative, or None
9246                         # ---------|--------------|------------------------------
9247                         #   select | seconds      | omitted
9248                         # ---------|--------------|------------------------------
9249
9250                         if timeout is not None and timeout < 0:
9251                                 timeout = None
9252                         if timeout is not None:
9253                                 select_args.append(timeout / 1000)
9254
9255                 select_events = select.select(*select_args)
9256                 poll_events = []
9257                 for fd in select_events[0]:
9258                         poll_events.append((fd, PollConstants.POLLIN))
9259                 return poll_events
9260
9261 class SequentialTaskQueue(SlotObject):
9262
9263         __slots__ = ("max_jobs", "running_tasks") + \
9264                 ("_dirty", "_scheduling", "_task_queue")
9265
9266         def __init__(self, **kwargs):
9267                 SlotObject.__init__(self, **kwargs)
9268                 self._task_queue = deque()
9269                 self.running_tasks = set()
9270                 if self.max_jobs is None:
9271                         self.max_jobs = 1
9272                 self._dirty = True
9273
9274         def add(self, task):
9275                 self._task_queue.append(task)
9276                 self._dirty = True
9277
9278         def addFront(self, task):
9279                 self._task_queue.appendleft(task)
9280                 self._dirty = True
9281
9282         def schedule(self):
9283
9284                 if not self._dirty:
9285                         return False
9286
9287                 if not self:
9288                         return False
9289
9290                 if self._scheduling:
9291                         # Ignore any recursive schedule() calls triggered via
9292                         # self._task_exit().
9293                         return False
9294
9295                 self._scheduling = True
9296
9297                 task_queue = self._task_queue
9298                 running_tasks = self.running_tasks
9299                 max_jobs = self.max_jobs
9300                 state_changed = False
9301
9302                 while task_queue and \
9303                         (max_jobs is True or len(running_tasks) < max_jobs):
9304                         task = task_queue.popleft()
9305                         cancelled = getattr(task, "cancelled", None)
9306                         if not cancelled:
9307                                 running_tasks.add(task)
9308                                 task.addExitListener(self._task_exit)
9309                                 task.start()
9310                         state_changed = True
9311
9312                 self._dirty = False
9313                 self._scheduling = False
9314
9315                 return state_changed
9316
9317         def _task_exit(self, task):
9318                 """
9319                 Since we can always rely on exit listeners being called, the set of
9320                 running tasks is always pruned automatically and there is never any need
9321                 to actively prune it.
9322                 """
9323                 self.running_tasks.remove(task)
9324                 if self._task_queue:
9325                         self._dirty = True
9326
9327         def clear(self):
9328                 self._task_queue.clear()
9329                 running_tasks = self.running_tasks
9330                 while running_tasks:
9331                         task = running_tasks.pop()
9332                         task.removeExitListener(self._task_exit)
9333                         task.cancel()
9334                 self._dirty = False
9335
9336         def __nonzero__(self):
9337                 return bool(self._task_queue or self.running_tasks)
9338
9339         def __len__(self):
9340                 return len(self._task_queue) + len(self.running_tasks)
9341
9342 _can_poll_device = None
9343
9344 def can_poll_device():
9345         """
9346         Test if it's possible to use poll() on a device such as a pty. This
9347         is known to fail on Darwin.
9348         @rtype: bool
9349         @returns: True if poll() on a device succeeds, False otherwise.
9350         """
9351
9352         global _can_poll_device
9353         if _can_poll_device is not None:
9354                 return _can_poll_device
9355
9356         if not hasattr(select, "poll"):
9357                 _can_poll_device = False
9358                 return _can_poll_device
9359
9360         try:
9361                 dev_null = open('/dev/null', 'rb')
9362         except IOError:
9363                 _can_poll_device = False
9364                 return _can_poll_device
9365
9366         p = select.poll()
9367         p.register(dev_null.fileno(), PollConstants.POLLIN)
9368
9369         invalid_request = False
9370         for f, event in p.poll():
9371                 if event & PollConstants.POLLNVAL:
9372                         invalid_request = True
9373                         break
9374         dev_null.close()
9375
9376         _can_poll_device = not invalid_request
9377         return _can_poll_device
9378
9379 def create_poll_instance():
9380         """
9381         Create an instance of select.poll, or an instance of
9382         PollSelectAdapter there is no poll() implementation or
9383         it is broken somehow.
9384         """
9385         if can_poll_device():
9386                 return select.poll()
9387         return PollSelectAdapter()
9388
9389 getloadavg = getattr(os, "getloadavg", None)
9390 if getloadavg is None:
9391         def getloadavg():
9392                 """
9393                 Uses /proc/loadavg to emulate os.getloadavg().
9394                 Raises OSError if the load average was unobtainable.
9395                 """
9396                 try:
9397                         loadavg_str = open('/proc/loadavg').readline()
9398                 except IOError:
9399                         # getloadavg() is only supposed to raise OSError, so convert
9400                         raise OSError('unknown')
9401                 loadavg_split = loadavg_str.split()
9402                 if len(loadavg_split) < 3:
9403                         raise OSError('unknown')
9404                 loadavg_floats = []
9405                 for i in xrange(3):
9406                         try:
9407                                 loadavg_floats.append(float(loadavg_split[i]))
9408                         except ValueError:
9409                                 raise OSError('unknown')
9410                 return tuple(loadavg_floats)
9411
9412 class PollScheduler(object):
9413
9414         class _sched_iface_class(SlotObject):
9415                 __slots__ = ("register", "schedule", "unregister")
9416
9417         def __init__(self):
9418                 self._max_jobs = 1
9419                 self._max_load = None
9420                 self._jobs = 0
9421                 self._poll_event_queue = []
9422                 self._poll_event_handlers = {}
9423                 self._poll_event_handler_ids = {}
9424                 # Increment id for each new handler.
9425                 self._event_handler_id = 0
9426                 self._poll_obj = create_poll_instance()
9427                 self._scheduling = False
9428
9429         def _schedule(self):
9430                 """
9431                 Calls _schedule_tasks() and automatically returns early from
9432                 any recursive calls to this method that the _schedule_tasks()
9433                 call might trigger. This makes _schedule() safe to call from
9434                 inside exit listeners.
9435                 """
9436                 if self._scheduling:
9437                         return False
9438                 self._scheduling = True
9439                 try:
9440                         return self._schedule_tasks()
9441                 finally:
9442                         self._scheduling = False
9443
9444         def _running_job_count(self):
9445                 return self._jobs
9446
9447         def _can_add_job(self):
9448                 max_jobs = self._max_jobs
9449                 max_load = self._max_load
9450
9451                 if self._max_jobs is not True and \
9452                         self._running_job_count() >= self._max_jobs:
9453                         return False
9454
9455                 if max_load is not None and \
9456                         (max_jobs is True or max_jobs > 1) and \
9457                         self._running_job_count() >= 1:
9458                         try:
9459                                 avg1, avg5, avg15 = getloadavg()
9460                         except OSError:
9461                                 return False
9462
9463                         if avg1 >= max_load:
9464                                 return False
9465
9466                 return True
9467
9468         def _poll(self, timeout=None):
9469                 """
9470                 All poll() calls pass through here. The poll events
9471                 are added directly to self._poll_event_queue.
9472                 In order to avoid endless blocking, this raises
9473                 StopIteration if timeout is None and there are
9474                 no file descriptors to poll.
9475                 """
9476                 if not self._poll_event_handlers:
9477                         self._schedule()
9478                         if timeout is None and \
9479                                 not self._poll_event_handlers:
9480                                 raise StopIteration(
9481                                         "timeout is None and there are no poll() event handlers")
9482
9483                 # The following error is known to occur with Linux kernel versions
9484                 # less than 2.6.24:
9485                 #
9486                 #   select.error: (4, 'Interrupted system call')
9487                 #
9488                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9489                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9490                 # without any events.
9491                 while True:
9492                         try:
9493                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9494                                 break
9495                         except select.error, e:
9496                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9497                                         level=logging.ERROR, noiselevel=-1)
9498                                 del e
9499                                 if timeout is not None:
9500                                         break
9501
9502         def _next_poll_event(self, timeout=None):
9503                 """
9504                 Since the _schedule_wait() loop is called by event
9505                 handlers from _poll_loop(), maintain a central event
9506                 queue for both of them to share events from a single
9507                 poll() call. In order to avoid endless blocking, this
9508                 raises StopIteration if timeout is None and there are
9509                 no file descriptors to poll.
9510                 """
9511                 if not self._poll_event_queue:
9512                         self._poll(timeout)
9513                 return self._poll_event_queue.pop()
9514
9515         def _poll_loop(self):
9516
9517                 event_handlers = self._poll_event_handlers
9518                 event_handled = False
9519
9520                 try:
9521                         while event_handlers:
9522                                 f, event = self._next_poll_event()
9523                                 handler, reg_id = event_handlers[f]
9524                                 handler(f, event)
9525                                 event_handled = True
9526                 except StopIteration:
9527                         event_handled = True
9528
9529                 if not event_handled:
9530                         raise AssertionError("tight loop")
9531
9532         def _schedule_yield(self):
9533                 """
9534                 Schedule for a short period of time chosen by the scheduler based
9535                 on internal state. Synchronous tasks should call this periodically
9536                 in order to allow the scheduler to service pending poll events. The
9537                 scheduler will call poll() exactly once, without blocking, and any
9538                 resulting poll events will be serviced.
9539                 """
9540                 event_handlers = self._poll_event_handlers
9541                 events_handled = 0
9542
9543                 if not event_handlers:
9544                         return bool(events_handled)
9545
9546                 if not self._poll_event_queue:
9547                         self._poll(0)
9548
9549                 try:
9550                         while event_handlers and self._poll_event_queue:
9551                                 f, event = self._next_poll_event()
9552                                 handler, reg_id = event_handlers[f]
9553                                 handler(f, event)
9554                                 events_handled += 1
9555                 except StopIteration:
9556                         events_handled += 1
9557
9558                 return bool(events_handled)
9559
9560         def _register(self, f, eventmask, handler):
9561                 """
9562                 @rtype: Integer
9563                 @return: A unique registration id, for use in schedule() or
9564                         unregister() calls.
9565                 """
9566                 if f in self._poll_event_handlers:
9567                         raise AssertionError("fd %d is already registered" % f)
9568                 self._event_handler_id += 1
9569                 reg_id = self._event_handler_id
9570                 self._poll_event_handler_ids[reg_id] = f
9571                 self._poll_event_handlers[f] = (handler, reg_id)
9572                 self._poll_obj.register(f, eventmask)
9573                 return reg_id
9574
9575         def _unregister(self, reg_id):
9576                 f = self._poll_event_handler_ids[reg_id]
9577                 self._poll_obj.unregister(f)
9578                 del self._poll_event_handlers[f]
9579                 del self._poll_event_handler_ids[reg_id]
9580
9581         def _schedule_wait(self, wait_ids):
9582                 """
9583                 Schedule until wait_id is not longer registered
9584                 for poll() events.
9585                 @type wait_id: int
9586                 @param wait_id: a task id to wait for
9587                 """
9588                 event_handlers = self._poll_event_handlers
9589                 handler_ids = self._poll_event_handler_ids
9590                 event_handled = False
9591
9592                 if isinstance(wait_ids, int):
9593                         wait_ids = frozenset([wait_ids])
9594
9595                 try:
9596                         while wait_ids.intersection(handler_ids):
9597                                 f, event = self._next_poll_event()
9598                                 handler, reg_id = event_handlers[f]
9599                                 handler(f, event)
9600                                 event_handled = True
9601                 except StopIteration:
9602                         event_handled = True
9603
9604                 return event_handled
9605
9606 class QueueScheduler(PollScheduler):
9607
9608         """
9609         Add instances of SequentialTaskQueue and then call run(). The
9610         run() method returns when no tasks remain.
9611         """
9612
9613         def __init__(self, max_jobs=None, max_load=None):
9614                 PollScheduler.__init__(self)
9615
9616                 if max_jobs is None:
9617                         max_jobs = 1
9618
9619                 self._max_jobs = max_jobs
9620                 self._max_load = max_load
9621                 self.sched_iface = self._sched_iface_class(
9622                         register=self._register,
9623                         schedule=self._schedule_wait,
9624                         unregister=self._unregister)
9625
9626                 self._queues = []
9627                 self._schedule_listeners = []
9628
9629         def add(self, q):
9630                 self._queues.append(q)
9631
9632         def remove(self, q):
9633                 self._queues.remove(q)
9634
9635         def run(self):
9636
9637                 while self._schedule():
9638                         self._poll_loop()
9639
9640                 while self._running_job_count():
9641                         self._poll_loop()
9642
9643         def _schedule_tasks(self):
9644                 """
9645                 @rtype: bool
9646                 @returns: True if there may be remaining tasks to schedule,
9647                         False otherwise.
9648                 """
9649                 while self._can_add_job():
9650                         n = self._max_jobs - self._running_job_count()
9651                         if n < 1:
9652                                 break
9653
9654                         if not self._start_next_job(n):
9655                                 return False
9656
9657                 for q in self._queues:
9658                         if q:
9659                                 return True
9660                 return False
9661
9662         def _running_job_count(self):
9663                 job_count = 0
9664                 for q in self._queues:
9665                         job_count += len(q.running_tasks)
9666                 self._jobs = job_count
9667                 return job_count
9668
9669         def _start_next_job(self, n=1):
9670                 started_count = 0
9671                 for q in self._queues:
9672                         initial_job_count = len(q.running_tasks)
9673                         q.schedule()
9674                         final_job_count = len(q.running_tasks)
9675                         if final_job_count > initial_job_count:
9676                                 started_count += (final_job_count - initial_job_count)
9677                         if started_count >= n:
9678                                 break
9679                 return started_count
9680
9681 class TaskScheduler(object):
9682
9683         """
9684         A simple way to handle scheduling of AsynchrousTask instances. Simply
9685         add tasks and call run(). The run() method returns when no tasks remain.
9686         """
9687
9688         def __init__(self, max_jobs=None, max_load=None):
9689                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9690                 self._scheduler = QueueScheduler(
9691                         max_jobs=max_jobs, max_load=max_load)
9692                 self.sched_iface = self._scheduler.sched_iface
9693                 self.run = self._scheduler.run
9694                 self._scheduler.add(self._queue)
9695
9696         def add(self, task):
9697                 self._queue.add(task)
9698
9699 class JobStatusDisplay(object):
9700
9701         _bound_properties = ("curval", "failed", "running")
9702         _jobs_column_width = 48
9703
9704         # Don't update the display unless at least this much
9705         # time has passed, in units of seconds.
9706         _min_display_latency = 2
9707
9708         _default_term_codes = {
9709                 'cr'  : '\r',
9710                 'el'  : '\x1b[K',
9711                 'nel' : '\n',
9712         }
9713
9714         _termcap_name_map = {
9715                 'carriage_return' : 'cr',
9716                 'clr_eol'         : 'el',
9717                 'newline'         : 'nel',
9718         }
9719
9720         def __init__(self, out=sys.stdout, quiet=False):
9721                 object.__setattr__(self, "out", out)
9722                 object.__setattr__(self, "quiet", quiet)
9723                 object.__setattr__(self, "maxval", 0)
9724                 object.__setattr__(self, "merges", 0)
9725                 object.__setattr__(self, "_changed", False)
9726                 object.__setattr__(self, "_displayed", False)
9727                 object.__setattr__(self, "_last_display_time", 0)
9728                 object.__setattr__(self, "width", 80)
9729                 self.reset()
9730
9731                 isatty = hasattr(out, "isatty") and out.isatty()
9732                 object.__setattr__(self, "_isatty", isatty)
9733                 if not isatty or not self._init_term():
9734                         term_codes = {}
9735                         for k, capname in self._termcap_name_map.iteritems():
9736                                 term_codes[k] = self._default_term_codes[capname]
9737                         object.__setattr__(self, "_term_codes", term_codes)
9738                 encoding = sys.getdefaultencoding()
9739                 for k, v in self._term_codes.items():
9740                         if not isinstance(v, basestring):
9741                                 self._term_codes[k] = v.decode(encoding, 'replace')
9742
9743         def _init_term(self):
9744                 """
9745                 Initialize term control codes.
9746                 @rtype: bool
9747                 @returns: True if term codes were successfully initialized,
9748                         False otherwise.
9749                 """
9750
9751                 term_type = os.environ.get("TERM", "vt100")
9752                 tigetstr = None
9753
9754                 try:
9755                         import curses
9756                         try:
9757                                 curses.setupterm(term_type, self.out.fileno())
9758                                 tigetstr = curses.tigetstr
9759                         except curses.error:
9760                                 pass
9761                 except ImportError:
9762                         pass
9763
9764                 if tigetstr is None:
9765                         return False
9766
9767                 term_codes = {}
9768                 for k, capname in self._termcap_name_map.iteritems():
9769                         code = tigetstr(capname)
9770                         if code is None:
9771                                 code = self._default_term_codes[capname]
9772                         term_codes[k] = code
9773                 object.__setattr__(self, "_term_codes", term_codes)
9774                 return True
9775
9776         def _format_msg(self, msg):
9777                 return ">>> %s" % msg
9778
9779         def _erase(self):
9780                 self.out.write(
9781                         self._term_codes['carriage_return'] + \
9782                         self._term_codes['clr_eol'])
9783                 self.out.flush()
9784                 self._displayed = False
9785
9786         def _display(self, line):
9787                 self.out.write(line)
9788                 self.out.flush()
9789                 self._displayed = True
9790
9791         def _update(self, msg):
9792
9793                 out = self.out
9794                 if not self._isatty:
9795                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9796                         self.out.flush()
9797                         self._displayed = True
9798                         return
9799
9800                 if self._displayed:
9801                         self._erase()
9802
9803                 self._display(self._format_msg(msg))
9804
9805         def displayMessage(self, msg):
9806
9807                 was_displayed = self._displayed
9808
9809                 if self._isatty and self._displayed:
9810                         self._erase()
9811
9812                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9813                 self.out.flush()
9814                 self._displayed = False
9815
9816                 if was_displayed:
9817                         self._changed = True
9818                         self.display()
9819
9820         def reset(self):
9821                 self.maxval = 0
9822                 self.merges = 0
9823                 for name in self._bound_properties:
9824                         object.__setattr__(self, name, 0)
9825
9826                 if self._displayed:
9827                         self.out.write(self._term_codes['newline'])
9828                         self.out.flush()
9829                         self._displayed = False
9830
9831         def __setattr__(self, name, value):
9832                 old_value = getattr(self, name)
9833                 if value == old_value:
9834                         return
9835                 object.__setattr__(self, name, value)
9836                 if name in self._bound_properties:
9837                         self._property_change(name, old_value, value)
9838
9839         def _property_change(self, name, old_value, new_value):
9840                 self._changed = True
9841                 self.display()
9842
9843         def _load_avg_str(self):
9844                 try:
9845                         avg = getloadavg()
9846                 except OSError:
9847                         return 'unknown'
9848
9849                 max_avg = max(avg)
9850
9851                 if max_avg < 10:
9852                         digits = 2
9853                 elif max_avg < 100:
9854                         digits = 1
9855                 else:
9856                         digits = 0
9857
9858                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9859
9860         def display(self):
9861                 """
9862                 Display status on stdout, but only if something has
9863                 changed since the last call.
9864                 """
9865
9866                 if self.quiet:
9867                         return
9868
9869                 current_time = time.time()
9870                 time_delta = current_time - self._last_display_time
9871                 if self._displayed and \
9872                         not self._changed:
9873                         if not self._isatty:
9874                                 return
9875                         if time_delta < self._min_display_latency:
9876                                 return
9877
9878                 self._last_display_time = current_time
9879                 self._changed = False
9880                 self._display_status()
9881
9882         def _display_status(self):
9883                 # Don't use len(self._completed_tasks) here since that also
9884                 # can include uninstall tasks.
9885                 curval_str = str(self.curval)
9886                 maxval_str = str(self.maxval)
9887                 running_str = str(self.running)
9888                 failed_str = str(self.failed)
9889                 load_avg_str = self._load_avg_str()
9890
9891                 color_output = StringIO()
9892                 plain_output = StringIO()
9893                 style_file = portage.output.ConsoleStyleFile(color_output)
9894                 style_file.write_listener = plain_output
9895                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9896                 style_writer.style_listener = style_file.new_styles
9897                 f = formatter.AbstractFormatter(style_writer)
9898
9899                 number_style = "INFORM"
9900                 f.add_literal_data("Jobs: ")
9901                 f.push_style(number_style)
9902                 f.add_literal_data(curval_str)
9903                 f.pop_style()
9904                 f.add_literal_data(" of ")
9905                 f.push_style(number_style)
9906                 f.add_literal_data(maxval_str)
9907                 f.pop_style()
9908                 f.add_literal_data(" complete")
9909
9910                 if self.running:
9911                         f.add_literal_data(", ")
9912                         f.push_style(number_style)
9913                         f.add_literal_data(running_str)
9914                         f.pop_style()
9915                         f.add_literal_data(" running")
9916
9917                 if self.failed:
9918                         f.add_literal_data(", ")
9919                         f.push_style(number_style)
9920                         f.add_literal_data(failed_str)
9921                         f.pop_style()
9922                         f.add_literal_data(" failed")
9923
9924                 padding = self._jobs_column_width - len(plain_output.getvalue())
9925                 if padding > 0:
9926                         f.add_literal_data(padding * " ")
9927
9928                 f.add_literal_data("Load avg: ")
9929                 f.add_literal_data(load_avg_str)
9930
9931                 # Truncate to fit width, to avoid making the terminal scroll if the
9932                 # line overflows (happens when the load average is large).
9933                 plain_output = plain_output.getvalue()
9934                 if self._isatty and len(plain_output) > self.width:
9935                         # Use plain_output here since it's easier to truncate
9936                         # properly than the color output which contains console
9937                         # color codes.
9938                         self._update(plain_output[:self.width])
9939                 else:
9940                         self._update(color_output.getvalue())
9941
9942                 xtermTitle(" ".join(plain_output.split()))
9943
9944 class Scheduler(PollScheduler):
9945
9946         _opts_ignore_blockers = \
9947                 frozenset(["--buildpkgonly",
9948                 "--fetchonly", "--fetch-all-uri",
9949                 "--nodeps", "--pretend"])
9950
9951         _opts_no_background = \
9952                 frozenset(["--pretend",
9953                 "--fetchonly", "--fetch-all-uri"])
9954
9955         _opts_no_restart = frozenset(["--buildpkgonly",
9956                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9957
9958         _bad_resume_opts = set(["--ask", "--changelog",
9959                 "--resume", "--skipfirst"])
9960
9961         _fetch_log = "/var/log/emerge-fetch.log"
9962
9963         class _iface_class(SlotObject):
9964                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9965                         "dblinkElog", "fetch", "register", "schedule",
9966                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9967                         "unregister")
9968
9969         class _fetch_iface_class(SlotObject):
9970                 __slots__ = ("log_file", "schedule")
9971
9972         _task_queues_class = slot_dict_class(
9973                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9974
9975         class _build_opts_class(SlotObject):
9976                 __slots__ = ("buildpkg", "buildpkgonly",
9977                         "fetch_all_uri", "fetchonly", "pretend")
9978
9979         class _binpkg_opts_class(SlotObject):
9980                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9981
9982         class _pkg_count_class(SlotObject):
9983                 __slots__ = ("curval", "maxval")
9984
9985         class _emerge_log_class(SlotObject):
9986                 __slots__ = ("xterm_titles",)
9987
9988                 def log(self, *pargs, **kwargs):
9989                         if not self.xterm_titles:
9990                                 # Avoid interference with the scheduler's status display.
9991                                 kwargs.pop("short_msg", None)
9992                         emergelog(self.xterm_titles, *pargs, **kwargs)
9993
9994         class _failed_pkg(SlotObject):
9995                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9996
9997         class _ConfigPool(object):
9998                 """Interface for a task to temporarily allocate a config
9999                 instance from a pool. This allows a task to be constructed
10000                 long before the config instance actually becomes needed, like
10001                 when prefetchers are constructed for the whole merge list."""
10002                 __slots__ = ("_root", "_allocate", "_deallocate")
10003                 def __init__(self, root, allocate, deallocate):
10004                         self._root = root
10005                         self._allocate = allocate
10006                         self._deallocate = deallocate
10007                 def allocate(self):
10008                         return self._allocate(self._root)
10009                 def deallocate(self, settings):
10010                         self._deallocate(settings)
10011
10012         class _unknown_internal_error(portage.exception.PortageException):
10013                 """
10014                 Used internally to terminate scheduling. The specific reason for
10015                 the failure should have been dumped to stderr.
10016                 """
10017                 def __init__(self, value=""):
10018                         portage.exception.PortageException.__init__(self, value)
10019
10020         def __init__(self, settings, trees, mtimedb, myopts,
10021                 spinner, mergelist, favorites, digraph):
10022                 PollScheduler.__init__(self)
10023                 self.settings = settings
10024                 self.target_root = settings["ROOT"]
10025                 self.trees = trees
10026                 self.myopts = myopts
10027                 self._spinner = spinner
10028                 self._mtimedb = mtimedb
10029                 self._mergelist = mergelist
10030                 self._favorites = favorites
10031                 self._args_set = InternalPackageSet(favorites)
10032                 self._build_opts = self._build_opts_class()
10033                 for k in self._build_opts.__slots__:
10034                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10035                 self._binpkg_opts = self._binpkg_opts_class()
10036                 for k in self._binpkg_opts.__slots__:
10037                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10038
10039                 self.curval = 0
10040                 self._logger = self._emerge_log_class()
10041                 self._task_queues = self._task_queues_class()
10042                 for k in self._task_queues.allowed_keys:
10043                         setattr(self._task_queues, k,
10044                                 SequentialTaskQueue())
10045
10046                 # Holds merges that will wait to be executed when no builds are
10047                 # executing. This is useful for system packages since dependencies
10048                 # on system packages are frequently unspecified.
10049                 self._merge_wait_queue = []
10050                 # Holds merges that have been transfered from the merge_wait_queue to
10051                 # the actual merge queue. They are removed from this list upon
10052                 # completion. Other packages can start building only when this list is
10053                 # empty.
10054                 self._merge_wait_scheduled = []
10055
10056                 # Holds system packages and their deep runtime dependencies. Before
10057                 # being merged, these packages go to merge_wait_queue, to be merged
10058                 # when no other packages are building.
10059                 self._deep_system_deps = set()
10060
10061                 # Holds packages to merge which will satisfy currently unsatisfied
10062                 # deep runtime dependencies of system packages. If this is not empty
10063                 # then no parallel builds will be spawned until it is empty. This
10064                 # minimizes the possibility that a build will fail due to the system
10065                 # being in a fragile state. For example, see bug #259954.
10066                 self._unsatisfied_system_deps = set()
10067
10068                 self._status_display = JobStatusDisplay()
10069                 self._max_load = myopts.get("--load-average")
10070                 max_jobs = myopts.get("--jobs")
10071                 if max_jobs is None:
10072                         max_jobs = 1
10073                 self._set_max_jobs(max_jobs)
10074
10075                 # The root where the currently running
10076                 # portage instance is installed.
10077                 self._running_root = trees["/"]["root_config"]
10078                 self.edebug = 0
10079                 if settings.get("PORTAGE_DEBUG", "") == "1":
10080                         self.edebug = 1
10081                 self.pkgsettings = {}
10082                 self._config_pool = {}
10083                 self._blocker_db = {}
10084                 for root in trees:
10085                         self._config_pool[root] = []
10086                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10087
10088                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10089                         schedule=self._schedule_fetch)
10090                 self._sched_iface = self._iface_class(
10091                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10092                         dblinkDisplayMerge=self._dblink_display_merge,
10093                         dblinkElog=self._dblink_elog,
10094                         fetch=fetch_iface, register=self._register,
10095                         schedule=self._schedule_wait,
10096                         scheduleSetup=self._schedule_setup,
10097                         scheduleUnpack=self._schedule_unpack,
10098                         scheduleYield=self._schedule_yield,
10099                         unregister=self._unregister)
10100
10101                 self._prefetchers = weakref.WeakValueDictionary()
10102                 self._pkg_queue = []
10103                 self._completed_tasks = set()
10104
10105                 self._failed_pkgs = []
10106                 self._failed_pkgs_all = []
10107                 self._failed_pkgs_die_msgs = []
10108                 self._post_mod_echo_msgs = []
10109                 self._parallel_fetch = False
10110                 merge_count = len([x for x in mergelist \
10111                         if isinstance(x, Package) and x.operation == "merge"])
10112                 self._pkg_count = self._pkg_count_class(
10113                         curval=0, maxval=merge_count)
10114                 self._status_display.maxval = self._pkg_count.maxval
10115
10116                 # The load average takes some time to respond when new
10117                 # jobs are added, so we need to limit the rate of adding
10118                 # new jobs.
10119                 self._job_delay_max = 10
10120                 self._job_delay_factor = 1.0
10121                 self._job_delay_exp = 1.5
10122                 self._previous_job_start_time = None
10123
10124                 self._set_digraph(digraph)
10125
10126                 # This is used to memoize the _choose_pkg() result when
10127                 # no packages can be chosen until one of the existing
10128                 # jobs completes.
10129                 self._choose_pkg_return_early = False
10130
10131                 features = self.settings.features
10132                 if "parallel-fetch" in features and \
10133                         not ("--pretend" in self.myopts or \
10134                         "--fetch-all-uri" in self.myopts or \
10135                         "--fetchonly" in self.myopts):
10136                         if "distlocks" not in features:
10137                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10138                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10139                                         "requires the distlocks feature enabled"+"\n",
10140                                         noiselevel=-1)
10141                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10142                                         "thus parallel-fetching is being disabled"+"\n",
10143                                         noiselevel=-1)
10144                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10145                         elif len(mergelist) > 1:
10146                                 self._parallel_fetch = True
10147
10148                 if self._parallel_fetch:
10149                                 # clear out existing fetch log if it exists
10150                                 try:
10151                                         open(self._fetch_log, 'w')
10152                                 except EnvironmentError:
10153                                         pass
10154
10155                 self._running_portage = None
10156                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10157                         portage.const.PORTAGE_PACKAGE_ATOM)
10158                 if portage_match:
10159                         cpv = portage_match.pop()
10160                         self._running_portage = self._pkg(cpv, "installed",
10161                                 self._running_root, installed=True)
10162
10163         def _poll(self, timeout=None):
10164                 self._schedule()
10165                 PollScheduler._poll(self, timeout=timeout)
10166
10167         def _set_max_jobs(self, max_jobs):
10168                 self._max_jobs = max_jobs
10169                 self._task_queues.jobs.max_jobs = max_jobs
10170
10171         def _background_mode(self):
10172                 """
10173                 Check if background mode is enabled and adjust states as necessary.
10174
10175                 @rtype: bool
10176                 @returns: True if background mode is enabled, False otherwise.
10177                 """
10178                 background = (self._max_jobs is True or \
10179                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10180                         not bool(self._opts_no_background.intersection(self.myopts))
10181
10182                 if background:
10183                         interactive_tasks = self._get_interactive_tasks()
10184                         if interactive_tasks:
10185                                 background = False
10186                                 writemsg_level(">>> Sending package output to stdio due " + \
10187                                         "to interactive package(s):\n",
10188                                         level=logging.INFO, noiselevel=-1)
10189                                 msg = [""]
10190                                 for pkg in interactive_tasks:
10191                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10192                                         if pkg.root != "/":
10193                                                 pkg_str += " for " + pkg.root
10194                                         msg.append(pkg_str)
10195                                 msg.append("")
10196                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10197                                         level=logging.INFO, noiselevel=-1)
10198                                 if self._max_jobs is True or self._max_jobs > 1:
10199                                         self._set_max_jobs(1)
10200                                         writemsg_level(">>> Setting --jobs=1 due " + \
10201                                                 "to the above interactive package(s)\n",
10202                                                 level=logging.INFO, noiselevel=-1)
10203
10204                 self._status_display.quiet = \
10205                         not background or \
10206                         ("--quiet" in self.myopts and \
10207                         "--verbose" not in self.myopts)
10208
10209                 self._logger.xterm_titles = \
10210                         "notitles" not in self.settings.features and \
10211                         self._status_display.quiet
10212
10213                 return background
10214
10215         def _get_interactive_tasks(self):
10216                 from portage import flatten
10217                 from portage.dep import use_reduce, paren_reduce
10218                 interactive_tasks = []
10219                 for task in self._mergelist:
10220                         if not (isinstance(task, Package) and \
10221                                 task.operation == "merge"):
10222                                 continue
10223                         try:
10224                                 properties = flatten(use_reduce(paren_reduce(
10225                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10226                         except portage.exception.InvalidDependString, e:
10227                                 show_invalid_depstring_notice(task,
10228                                         task.metadata["PROPERTIES"], str(e))
10229                                 raise self._unknown_internal_error()
10230                         if "interactive" in properties:
10231                                 interactive_tasks.append(task)
10232                 return interactive_tasks
10233
10234         def _set_digraph(self, digraph):
10235                 if "--nodeps" in self.myopts or \
10236                         (self._max_jobs is not True and self._max_jobs < 2):
10237                         # save some memory
10238                         self._digraph = None
10239                         return
10240
10241                 self._digraph = digraph
10242                 self._find_system_deps()
10243                 self._prune_digraph()
10244                 self._prevent_builddir_collisions()
10245
10246         def _find_system_deps(self):
10247                 """
10248                 Find system packages and their deep runtime dependencies. Before being
10249                 merged, these packages go to merge_wait_queue, to be merged when no
10250                 other packages are building.
10251                 """
10252                 deep_system_deps = self._deep_system_deps
10253                 deep_system_deps.clear()
10254                 deep_system_deps.update(
10255                         _find_deep_system_runtime_deps(self._digraph))
10256                 deep_system_deps.difference_update([pkg for pkg in \
10257                         deep_system_deps if pkg.operation != "merge"])
10258
10259         def _prune_digraph(self):
10260                 """
10261                 Prune any root nodes that are irrelevant.
10262                 """
10263
10264                 graph = self._digraph
10265                 completed_tasks = self._completed_tasks
10266                 removed_nodes = set()
10267                 while True:
10268                         for node in graph.root_nodes():
10269                                 if not isinstance(node, Package) or \
10270                                         (node.installed and node.operation == "nomerge") or \
10271                                         node.onlydeps or \
10272                                         node in completed_tasks:
10273                                         removed_nodes.add(node)
10274                         if removed_nodes:
10275                                 graph.difference_update(removed_nodes)
10276                         if not removed_nodes:
10277                                 break
10278                         removed_nodes.clear()
10279
10280         def _prevent_builddir_collisions(self):
10281                 """
10282                 When building stages, sometimes the same exact cpv needs to be merged
10283                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10284                 in the builddir. Currently, normal file locks would be inappropriate
10285                 for this purpose since emerge holds all of it's build dir locks from
10286                 the main process.
10287                 """
10288                 cpv_map = {}
10289                 for pkg in self._mergelist:
10290                         if not isinstance(pkg, Package):
10291                                 # a satisfied blocker
10292                                 continue
10293                         if pkg.installed:
10294                                 continue
10295                         if pkg.cpv not in cpv_map:
10296                                 cpv_map[pkg.cpv] = [pkg]
10297                                 continue
10298                         for earlier_pkg in cpv_map[pkg.cpv]:
10299                                 self._digraph.add(earlier_pkg, pkg,
10300                                         priority=DepPriority(buildtime=True))
10301                         cpv_map[pkg.cpv].append(pkg)
10302
10303         class _pkg_failure(portage.exception.PortageException):
10304                 """
10305                 An instance of this class is raised by unmerge() when
10306                 an uninstallation fails.
10307                 """
10308                 status = 1
10309                 def __init__(self, *pargs):
10310                         portage.exception.PortageException.__init__(self, pargs)
10311                         if pargs:
10312                                 self.status = pargs[0]
10313
10314         def _schedule_fetch(self, fetcher):
10315                 """
10316                 Schedule a fetcher on the fetch queue, in order to
10317                 serialize access to the fetch log.
10318                 """
10319                 self._task_queues.fetch.addFront(fetcher)
10320
10321         def _schedule_setup(self, setup_phase):
10322                 """
10323                 Schedule a setup phase on the merge queue, in order to
10324                 serialize unsandboxed access to the live filesystem.
10325                 """
10326                 self._task_queues.merge.addFront(setup_phase)
10327                 self._schedule()
10328
10329         def _schedule_unpack(self, unpack_phase):
10330                 """
10331                 Schedule an unpack phase on the unpack queue, in order
10332                 to serialize $DISTDIR access for live ebuilds.
10333                 """
10334                 self._task_queues.unpack.add(unpack_phase)
10335
10336         def _find_blockers(self, new_pkg):
10337                 """
10338                 Returns a callable which should be called only when
10339                 the vdb lock has been acquired.
10340                 """
10341                 def get_blockers():
10342                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10343                 return get_blockers
10344
10345         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10346                 if self._opts_ignore_blockers.intersection(self.myopts):
10347                         return None
10348
10349                 # Call gc.collect() here to avoid heap overflow that
10350                 # triggers 'Cannot allocate memory' errors (reported
10351                 # with python-2.5).
10352                 import gc
10353                 gc.collect()
10354
10355                 blocker_db = self._blocker_db[new_pkg.root]
10356
10357                 blocker_dblinks = []
10358                 for blocking_pkg in blocker_db.findInstalledBlockers(
10359                         new_pkg, acquire_lock=acquire_lock):
10360                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10361                                 continue
10362                         if new_pkg.cpv == blocking_pkg.cpv:
10363                                 continue
10364                         blocker_dblinks.append(portage.dblink(
10365                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10366                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10367                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10368
10369                 gc.collect()
10370
10371                 return blocker_dblinks
10372
10373         def _dblink_pkg(self, pkg_dblink):
10374                 cpv = pkg_dblink.mycpv
10375                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10376                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10377                 installed = type_name == "installed"
10378                 return self._pkg(cpv, type_name, root_config, installed=installed)
10379
10380         def _append_to_log_path(self, log_path, msg):
10381                 f = open(log_path, 'a')
10382                 try:
10383                         f.write(msg)
10384                 finally:
10385                         f.close()
10386
10387         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10388
10389                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10390                 log_file = None
10391                 out = sys.stdout
10392                 background = self._background
10393
10394                 if background and log_path is not None:
10395                         log_file = open(log_path, 'a')
10396                         out = log_file
10397
10398                 try:
10399                         for msg in msgs:
10400                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10401                 finally:
10402                         if log_file is not None:
10403                                 log_file.close()
10404
10405         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10406                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10407                 background = self._background
10408
10409                 if log_path is None:
10410                         if not (background and level < logging.WARN):
10411                                 portage.util.writemsg_level(msg,
10412                                         level=level, noiselevel=noiselevel)
10413                 else:
10414                         if not background:
10415                                 portage.util.writemsg_level(msg,
10416                                         level=level, noiselevel=noiselevel)
10417                         self._append_to_log_path(log_path, msg)
10418
10419         def _dblink_ebuild_phase(self,
10420                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10421                 """
10422                 Using this callback for merge phases allows the scheduler
10423                 to run while these phases execute asynchronously, and allows
10424                 the scheduler control output handling.
10425                 """
10426
10427                 scheduler = self._sched_iface
10428                 settings = pkg_dblink.settings
10429                 pkg = self._dblink_pkg(pkg_dblink)
10430                 background = self._background
10431                 log_path = settings.get("PORTAGE_LOG_FILE")
10432
10433                 ebuild_phase = EbuildPhase(background=background,
10434                         pkg=pkg, phase=phase, scheduler=scheduler,
10435                         settings=settings, tree=pkg_dblink.treetype)
10436                 ebuild_phase.start()
10437                 ebuild_phase.wait()
10438
10439                 return ebuild_phase.returncode
10440
10441         def _generate_digests(self):
10442                 """
10443                 Generate digests if necessary for --digests or FEATURES=digest.
10444                 In order to avoid interference, this must done before parallel
10445                 tasks are started.
10446                 """
10447
10448                 if '--fetchonly' in self.myopts:
10449                         return os.EX_OK
10450
10451                 digest = '--digest' in self.myopts
10452                 if not digest:
10453                         for pkgsettings in self.pkgsettings.itervalues():
10454                                 if 'digest' in pkgsettings.features:
10455                                         digest = True
10456                                         break
10457
10458                 if not digest:
10459                         return os.EX_OK
10460
10461                 for x in self._mergelist:
10462                         if not isinstance(x, Package) or \
10463                                 x.type_name != 'ebuild' or \
10464                                 x.operation != 'merge':
10465                                 continue
10466                         pkgsettings = self.pkgsettings[x.root]
10467                         if '--digest' not in self.myopts and \
10468                                 'digest' not in pkgsettings.features:
10469                                 continue
10470                         portdb = x.root_config.trees['porttree'].dbapi
10471                         ebuild_path = portdb.findname(x.cpv)
10472                         if not ebuild_path:
10473                                 writemsg_level(
10474                                         "!!! Could not locate ebuild for '%s'.\n" \
10475                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10476                                 return 1
10477                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10478                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10479                                 writemsg_level(
10480                                         "!!! Unable to generate manifest for '%s'.\n" \
10481                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10482                                 return 1
10483
10484                 return os.EX_OK
10485
10486         def _check_manifests(self):
10487                 # Verify all the manifests now so that the user is notified of failure
10488                 # as soon as possible.
10489                 if "strict" not in self.settings.features or \
10490                         "--fetchonly" in self.myopts or \
10491                         "--fetch-all-uri" in self.myopts:
10492                         return os.EX_OK
10493
10494                 shown_verifying_msg = False
10495                 quiet_settings = {}
10496                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10497                         quiet_config = portage.config(clone=pkgsettings)
10498                         quiet_config["PORTAGE_QUIET"] = "1"
10499                         quiet_config.backup_changes("PORTAGE_QUIET")
10500                         quiet_settings[myroot] = quiet_config
10501                         del quiet_config
10502
10503                 for x in self._mergelist:
10504                         if not isinstance(x, Package) or \
10505                                 x.type_name != "ebuild":
10506                                 continue
10507
10508                         if not shown_verifying_msg:
10509                                 shown_verifying_msg = True
10510                                 self._status_msg("Verifying ebuild manifests")
10511
10512                         root_config = x.root_config
10513                         portdb = root_config.trees["porttree"].dbapi
10514                         quiet_config = quiet_settings[root_config.root]
10515                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10516                         if not portage.digestcheck([], quiet_config, strict=True):
10517                                 return 1
10518
10519                 return os.EX_OK
10520
10521         def _add_prefetchers(self):
10522
10523                 if not self._parallel_fetch:
10524                         return
10525
10526                 if self._parallel_fetch:
10527                         self._status_msg("Starting parallel fetch")
10528
10529                         prefetchers = self._prefetchers
10530                         getbinpkg = "--getbinpkg" in self.myopts
10531
10532                         # In order to avoid "waiting for lock" messages
10533                         # at the beginning, which annoy users, never
10534                         # spawn a prefetcher for the first package.
10535                         for pkg in self._mergelist[1:]:
10536                                 prefetcher = self._create_prefetcher(pkg)
10537                                 if prefetcher is not None:
10538                                         self._task_queues.fetch.add(prefetcher)
10539                                         prefetchers[pkg] = prefetcher
10540
10541         def _create_prefetcher(self, pkg):
10542                 """
10543                 @return: a prefetcher, or None if not applicable
10544                 """
10545                 prefetcher = None
10546
10547                 if not isinstance(pkg, Package):
10548                         pass
10549
10550                 elif pkg.type_name == "ebuild":
10551
10552                         prefetcher = EbuildFetcher(background=True,
10553                                 config_pool=self._ConfigPool(pkg.root,
10554                                 self._allocate_config, self._deallocate_config),
10555                                 fetchonly=1, logfile=self._fetch_log,
10556                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10557
10558                 elif pkg.type_name == "binary" and \
10559                         "--getbinpkg" in self.myopts and \
10560                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10561
10562                         prefetcher = BinpkgPrefetcher(background=True,
10563                                 pkg=pkg, scheduler=self._sched_iface)
10564
10565                 return prefetcher
10566
10567         def _is_restart_scheduled(self):
10568                 """
10569                 Check if the merge list contains a replacement
10570                 for the current running instance, that will result
10571                 in restart after merge.
10572                 @rtype: bool
10573                 @returns: True if a restart is scheduled, False otherwise.
10574                 """
10575                 if self._opts_no_restart.intersection(self.myopts):
10576                         return False
10577
10578                 mergelist = self._mergelist
10579
10580                 for i, pkg in enumerate(mergelist):
10581                         if self._is_restart_necessary(pkg) and \
10582                                 i != len(mergelist) - 1:
10583                                 return True
10584
10585                 return False
10586
10587         def _is_restart_necessary(self, pkg):
10588                 """
10589                 @return: True if merging the given package
10590                         requires restart, False otherwise.
10591                 """
10592
10593                 # Figure out if we need a restart.
10594                 if pkg.root == self._running_root.root and \
10595                         portage.match_from_list(
10596                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10597                         if self._running_portage:
10598                                 return pkg.cpv != self._running_portage.cpv
10599                         return True
10600                 return False
10601
10602         def _restart_if_necessary(self, pkg):
10603                 """
10604                 Use execv() to restart emerge. This happens
10605                 if portage upgrades itself and there are
10606                 remaining packages in the list.
10607                 """
10608
10609                 if self._opts_no_restart.intersection(self.myopts):
10610                         return
10611
10612                 if not self._is_restart_necessary(pkg):
10613                         return
10614
10615                 if pkg == self._mergelist[-1]:
10616                         return
10617
10618                 self._main_loop_cleanup()
10619
10620                 logger = self._logger
10621                 pkg_count = self._pkg_count
10622                 mtimedb = self._mtimedb
10623                 bad_resume_opts = self._bad_resume_opts
10624
10625                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10626                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10627
10628                 logger.log(" *** RESTARTING " + \
10629                         "emerge via exec() after change of " + \
10630                         "portage version.")
10631
10632                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10633                 mtimedb.commit()
10634                 portage.run_exitfuncs()
10635                 mynewargv = [sys.argv[0], "--resume"]
10636                 resume_opts = self.myopts.copy()
10637                 # For automatic resume, we need to prevent
10638                 # any of bad_resume_opts from leaking in
10639                 # via EMERGE_DEFAULT_OPTS.
10640                 resume_opts["--ignore-default-opts"] = True
10641                 for myopt, myarg in resume_opts.iteritems():
10642                         if myopt not in bad_resume_opts:
10643                                 if myarg is True:
10644                                         mynewargv.append(myopt)
10645                                 else:
10646                                         mynewargv.append(myopt +"="+ str(myarg))
10647                 # priority only needs to be adjusted on the first run
10648                 os.environ["PORTAGE_NICENESS"] = "0"
10649                 os.execv(mynewargv[0], mynewargv)
10650
10651         def merge(self):
10652
10653                 if "--resume" in self.myopts:
10654                         # We're resuming.
10655                         portage.writemsg_stdout(
10656                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10657                         self._logger.log(" *** Resuming merge...")
10658
10659                 self._save_resume_list()
10660
10661                 try:
10662                         self._background = self._background_mode()
10663                 except self._unknown_internal_error:
10664                         return 1
10665
10666                 for root in self.trees:
10667                         root_config = self.trees[root]["root_config"]
10668
10669                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10670                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10671                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10672                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10673                         if not tmpdir or not os.path.isdir(tmpdir):
10674                                 msg = "The directory specified in your " + \
10675                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10676                                 "does not exist. Please create this " + \
10677                                 "directory or correct your PORTAGE_TMPDIR setting."
10678                                 msg = textwrap.wrap(msg, 70)
10679                                 out = portage.output.EOutput()
10680                                 for l in msg:
10681                                         out.eerror(l)
10682                                 return 1
10683
10684                         if self._background:
10685                                 root_config.settings.unlock()
10686                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10687                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10688                                 root_config.settings.lock()
10689
10690                         self.pkgsettings[root] = portage.config(
10691                                 clone=root_config.settings)
10692
10693                 rval = self._generate_digests()
10694                 if rval != os.EX_OK:
10695                         return rval
10696
10697                 rval = self._check_manifests()
10698                 if rval != os.EX_OK:
10699                         return rval
10700
10701                 keep_going = "--keep-going" in self.myopts
10702                 fetchonly = self._build_opts.fetchonly
10703                 mtimedb = self._mtimedb
10704                 failed_pkgs = self._failed_pkgs
10705
10706                 while True:
10707                         rval = self._merge()
10708                         if rval == os.EX_OK or fetchonly or not keep_going:
10709                                 break
10710                         if "resume" not in mtimedb:
10711                                 break
10712                         mergelist = self._mtimedb["resume"].get("mergelist")
10713                         if not mergelist:
10714                                 break
10715
10716                         if not failed_pkgs:
10717                                 break
10718
10719                         for failed_pkg in failed_pkgs:
10720                                 mergelist.remove(list(failed_pkg.pkg))
10721
10722                         self._failed_pkgs_all.extend(failed_pkgs)
10723                         del failed_pkgs[:]
10724
10725                         if not mergelist:
10726                                 break
10727
10728                         if not self._calc_resume_list():
10729                                 break
10730
10731                         clear_caches(self.trees)
10732                         if not self._mergelist:
10733                                 break
10734
10735                         self._save_resume_list()
10736                         self._pkg_count.curval = 0
10737                         self._pkg_count.maxval = len([x for x in self._mergelist \
10738                                 if isinstance(x, Package) and x.operation == "merge"])
10739                         self._status_display.maxval = self._pkg_count.maxval
10740
10741                 self._logger.log(" *** Finished. Cleaning up...")
10742
10743                 if failed_pkgs:
10744                         self._failed_pkgs_all.extend(failed_pkgs)
10745                         del failed_pkgs[:]
10746
10747                 background = self._background
10748                 failure_log_shown = False
10749                 if background and len(self._failed_pkgs_all) == 1:
10750                         # If only one package failed then just show it's
10751                         # whole log for easy viewing.
10752                         failed_pkg = self._failed_pkgs_all[-1]
10753                         build_dir = failed_pkg.build_dir
10754                         log_file = None
10755
10756                         log_paths = [failed_pkg.build_log]
10757
10758                         log_path = self._locate_failure_log(failed_pkg)
10759                         if log_path is not None:
10760                                 try:
10761                                         log_file = open(log_path)
10762                                 except IOError:
10763                                         pass
10764
10765                         if log_file is not None:
10766                                 try:
10767                                         for line in log_file:
10768                                                 writemsg_level(line, noiselevel=-1)
10769                                 finally:
10770                                         log_file.close()
10771                                 failure_log_shown = True
10772
10773                 # Dump mod_echo output now since it tends to flood the terminal.
10774                 # This allows us to avoid having more important output, generated
10775                 # later, from being swept away by the mod_echo output.
10776                 mod_echo_output =  _flush_elog_mod_echo()
10777
10778                 if background and not failure_log_shown and \
10779                         self._failed_pkgs_all and \
10780                         self._failed_pkgs_die_msgs and \
10781                         not mod_echo_output:
10782
10783                         printer = portage.output.EOutput()
10784                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10785                                 root_msg = ""
10786                                 if mysettings["ROOT"] != "/":
10787                                         root_msg = " merged to %s" % mysettings["ROOT"]
10788                                 print
10789                                 printer.einfo("Error messages for package %s%s:" % \
10790                                         (colorize("INFORM", key), root_msg))
10791                                 print
10792                                 for phase in portage.const.EBUILD_PHASES:
10793                                         if phase not in logentries:
10794                                                 continue
10795                                         for msgtype, msgcontent in logentries[phase]:
10796                                                 if isinstance(msgcontent, basestring):
10797                                                         msgcontent = [msgcontent]
10798                                                 for line in msgcontent:
10799                                                         printer.eerror(line.strip("\n"))
10800
10801                 if self._post_mod_echo_msgs:
10802                         for msg in self._post_mod_echo_msgs:
10803                                 msg()
10804
10805                 if len(self._failed_pkgs_all) > 1 or \
10806                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10807                         if len(self._failed_pkgs_all) > 1:
10808                                 msg = "The following %d packages have " % \
10809                                         len(self._failed_pkgs_all) + \
10810                                         "failed to build or install:"
10811                         else:
10812                                 msg = "The following package has " + \
10813                                         "failed to build or install:"
10814                         prefix = bad(" * ")
10815                         writemsg(prefix + "\n", noiselevel=-1)
10816                         from textwrap import wrap
10817                         for line in wrap(msg, 72):
10818                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10819                         writemsg(prefix + "\n", noiselevel=-1)
10820                         for failed_pkg in self._failed_pkgs_all:
10821                                 writemsg("%s\t%s\n" % (prefix,
10822                                         colorize("INFORM", str(failed_pkg.pkg))),
10823                                         noiselevel=-1)
10824                         writemsg(prefix + "\n", noiselevel=-1)
10825
10826                 return rval
10827
10828         def _elog_listener(self, mysettings, key, logentries, fulltext):
10829                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10830                 if errors:
10831                         self._failed_pkgs_die_msgs.append(
10832                                 (mysettings, key, errors))
10833
10834         def _locate_failure_log(self, failed_pkg):
10835
10836                 build_dir = failed_pkg.build_dir
10837                 log_file = None
10838
10839                 log_paths = [failed_pkg.build_log]
10840
10841                 for log_path in log_paths:
10842                         if not log_path:
10843                                 continue
10844
10845                         try:
10846                                 log_size = os.stat(log_path).st_size
10847                         except OSError:
10848                                 continue
10849
10850                         if log_size == 0:
10851                                 continue
10852
10853                         return log_path
10854
10855                 return None
10856
10857         def _add_packages(self):
10858                 pkg_queue = self._pkg_queue
10859                 for pkg in self._mergelist:
10860                         if isinstance(pkg, Package):
10861                                 pkg_queue.append(pkg)
10862                         elif isinstance(pkg, Blocker):
10863                                 pass
10864
10865         def _system_merge_started(self, merge):
10866                 """
10867                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10868                 """
10869                 graph = self._digraph
10870                 if graph is None:
10871                         return
10872                 pkg = merge.merge.pkg
10873
10874                 # Skip this if $ROOT != / since it shouldn't matter if there
10875                 # are unsatisfied system runtime deps in this case.
10876                 if pkg.root != '/':
10877                         return
10878
10879                 completed_tasks = self._completed_tasks
10880                 unsatisfied = self._unsatisfied_system_deps
10881
10882                 def ignore_non_runtime_or_satisfied(priority):
10883                         """
10884                         Ignore non-runtime and satisfied runtime priorities.
10885                         """
10886                         if isinstance(priority, DepPriority) and \
10887                                 not priority.satisfied and \
10888                                 (priority.runtime or priority.runtime_post):
10889                                 return False
10890                         return True
10891
10892                 # When checking for unsatisfied runtime deps, only check
10893                 # direct deps since indirect deps are checked when the
10894                 # corresponding parent is merged.
10895                 for child in graph.child_nodes(pkg,
10896                         ignore_priority=ignore_non_runtime_or_satisfied):
10897                         if not isinstance(child, Package) or \
10898                                 child.operation == 'uninstall':
10899                                 continue
10900                         if child is pkg:
10901                                 continue
10902                         if child.operation == 'merge' and \
10903                                 child not in completed_tasks:
10904                                 unsatisfied.add(child)
10905
10906         def _merge_wait_exit_handler(self, task):
10907                 self._merge_wait_scheduled.remove(task)
10908                 self._merge_exit(task)
10909
10910         def _merge_exit(self, merge):
10911                 self._do_merge_exit(merge)
10912                 self._deallocate_config(merge.merge.settings)
10913                 if merge.returncode == os.EX_OK and \
10914                         not merge.merge.pkg.installed:
10915                         self._status_display.curval += 1
10916                 self._status_display.merges = len(self._task_queues.merge)
10917                 self._schedule()
10918
10919         def _do_merge_exit(self, merge):
10920                 pkg = merge.merge.pkg
10921                 if merge.returncode != os.EX_OK:
10922                         settings = merge.merge.settings
10923                         build_dir = settings.get("PORTAGE_BUILDDIR")
10924                         build_log = settings.get("PORTAGE_LOG_FILE")
10925
10926                         self._failed_pkgs.append(self._failed_pkg(
10927                                 build_dir=build_dir, build_log=build_log,
10928                                 pkg=pkg,
10929                                 returncode=merge.returncode))
10930                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10931
10932                         self._status_display.failed = len(self._failed_pkgs)
10933                         return
10934
10935                 self._task_complete(pkg)
10936                 pkg_to_replace = merge.merge.pkg_to_replace
10937                 if pkg_to_replace is not None:
10938                         # When a package is replaced, mark it's uninstall
10939                         # task complete (if any).
10940                         uninst_hash_key = \
10941                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10942                         self._task_complete(uninst_hash_key)
10943
10944                 if pkg.installed:
10945                         return
10946
10947                 self._restart_if_necessary(pkg)
10948
10949                 # Call mtimedb.commit() after each merge so that
10950                 # --resume still works after being interrupted
10951                 # by reboot, sigkill or similar.
10952                 mtimedb = self._mtimedb
10953                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10954                 if not mtimedb["resume"]["mergelist"]:
10955                         del mtimedb["resume"]
10956                 mtimedb.commit()
10957
10958         def _build_exit(self, build):
10959                 if build.returncode == os.EX_OK:
10960                         self.curval += 1
10961                         merge = PackageMerge(merge=build)
10962                         if not build.build_opts.buildpkgonly and \
10963                                 build.pkg in self._deep_system_deps:
10964                                 # Since dependencies on system packages are frequently
10965                                 # unspecified, merge them only when no builds are executing.
10966                                 self._merge_wait_queue.append(merge)
10967                                 merge.addStartListener(self._system_merge_started)
10968                         else:
10969                                 merge.addExitListener(self._merge_exit)
10970                                 self._task_queues.merge.add(merge)
10971                                 self._status_display.merges = len(self._task_queues.merge)
10972                 else:
10973                         settings = build.settings
10974                         build_dir = settings.get("PORTAGE_BUILDDIR")
10975                         build_log = settings.get("PORTAGE_LOG_FILE")
10976
10977                         self._failed_pkgs.append(self._failed_pkg(
10978                                 build_dir=build_dir, build_log=build_log,
10979                                 pkg=build.pkg,
10980                                 returncode=build.returncode))
10981                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10982
10983                         self._status_display.failed = len(self._failed_pkgs)
10984                         self._deallocate_config(build.settings)
10985                 self._jobs -= 1
10986                 self._status_display.running = self._jobs
10987                 self._schedule()
10988
10989         def _extract_exit(self, build):
10990                 self._build_exit(build)
10991
10992         def _task_complete(self, pkg):
10993                 self._completed_tasks.add(pkg)
10994                 self._unsatisfied_system_deps.discard(pkg)
10995                 self._choose_pkg_return_early = False
10996
10997         def _merge(self):
10998
10999                 self._add_prefetchers()
11000                 self._add_packages()
11001                 pkg_queue = self._pkg_queue
11002                 failed_pkgs = self._failed_pkgs
11003                 portage.locks._quiet = self._background
11004                 portage.elog._emerge_elog_listener = self._elog_listener
11005                 rval = os.EX_OK
11006
11007                 try:
11008                         self._main_loop()
11009                 finally:
11010                         self._main_loop_cleanup()
11011                         portage.locks._quiet = False
11012                         portage.elog._emerge_elog_listener = None
11013                         if failed_pkgs:
11014                                 rval = failed_pkgs[-1].returncode
11015
11016                 return rval
11017
11018         def _main_loop_cleanup(self):
11019                 del self._pkg_queue[:]
11020                 self._completed_tasks.clear()
11021                 self._deep_system_deps.clear()
11022                 self._unsatisfied_system_deps.clear()
11023                 self._choose_pkg_return_early = False
11024                 self._status_display.reset()
11025                 self._digraph = None
11026                 self._task_queues.fetch.clear()
11027
11028         def _choose_pkg(self):
11029                 """
11030                 Choose a task that has all it's dependencies satisfied.
11031                 """
11032
11033                 if self._choose_pkg_return_early:
11034                         return None
11035
11036                 if self._digraph is None:
11037                         if (self._jobs or self._task_queues.merge) and \
11038                                 not ("--nodeps" in self.myopts and \
11039                                 (self._max_jobs is True or self._max_jobs > 1)):
11040                                 self._choose_pkg_return_early = True
11041                                 return None
11042                         return self._pkg_queue.pop(0)
11043
11044                 if not (self._jobs or self._task_queues.merge):
11045                         return self._pkg_queue.pop(0)
11046
11047                 self._prune_digraph()
11048
11049                 chosen_pkg = None
11050                 later = set(self._pkg_queue)
11051                 for pkg in self._pkg_queue:
11052                         later.remove(pkg)
11053                         if not self._dependent_on_scheduled_merges(pkg, later):
11054                                 chosen_pkg = pkg
11055                                 break
11056
11057                 if chosen_pkg is not None:
11058                         self._pkg_queue.remove(chosen_pkg)
11059
11060                 if chosen_pkg is None:
11061                         # There's no point in searching for a package to
11062                         # choose until at least one of the existing jobs
11063                         # completes.
11064                         self._choose_pkg_return_early = True
11065
11066                 return chosen_pkg
11067
11068         def _dependent_on_scheduled_merges(self, pkg, later):
11069                 """
11070                 Traverse the subgraph of the given packages deep dependencies
11071                 to see if it contains any scheduled merges.
11072                 @param pkg: a package to check dependencies for
11073                 @type pkg: Package
11074                 @param later: packages for which dependence should be ignored
11075                         since they will be merged later than pkg anyway and therefore
11076                         delaying the merge of pkg will not result in a more optimal
11077                         merge order
11078                 @type later: set
11079                 @rtype: bool
11080                 @returns: True if the package is dependent, False otherwise.
11081                 """
11082
11083                 graph = self._digraph
11084                 completed_tasks = self._completed_tasks
11085
11086                 dependent = False
11087                 traversed_nodes = set([pkg])
11088                 direct_deps = graph.child_nodes(pkg)
11089                 node_stack = direct_deps
11090                 direct_deps = frozenset(direct_deps)
11091                 while node_stack:
11092                         node = node_stack.pop()
11093                         if node in traversed_nodes:
11094                                 continue
11095                         traversed_nodes.add(node)
11096                         if not ((node.installed and node.operation == "nomerge") or \
11097                                 (node.operation == "uninstall" and \
11098                                 node not in direct_deps) or \
11099                                 node in completed_tasks or \
11100                                 node in later):
11101                                 dependent = True
11102                                 break
11103                         node_stack.extend(graph.child_nodes(node))
11104
11105                 return dependent
11106
11107         def _allocate_config(self, root):
11108                 """
11109                 Allocate a unique config instance for a task in order
11110                 to prevent interference between parallel tasks.
11111                 """
11112                 if self._config_pool[root]:
11113                         temp_settings = self._config_pool[root].pop()
11114                 else:
11115                         temp_settings = portage.config(clone=self.pkgsettings[root])
11116                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11117                 # performance reasons, call it here to make sure all settings from the
11118                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11119                 temp_settings.reload()
11120                 temp_settings.reset()
11121                 return temp_settings
11122
11123         def _deallocate_config(self, settings):
11124                 self._config_pool[settings["ROOT"]].append(settings)
11125
11126         def _main_loop(self):
11127
11128                 # Only allow 1 job max if a restart is scheduled
11129                 # due to portage update.
11130                 if self._is_restart_scheduled() or \
11131                         self._opts_no_background.intersection(self.myopts):
11132                         self._set_max_jobs(1)
11133
11134                 merge_queue = self._task_queues.merge
11135
11136                 while self._schedule():
11137                         if self._poll_event_handlers:
11138                                 self._poll_loop()
11139
11140                 while True:
11141                         self._schedule()
11142                         if not (self._jobs or merge_queue):
11143                                 break
11144                         if self._poll_event_handlers:
11145                                 self._poll_loop()
11146
11147         def _keep_scheduling(self):
11148                 return bool(self._pkg_queue and \
11149                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11150
11151         def _schedule_tasks(self):
11152
11153                 # When the number of jobs drops to zero, process all waiting merges.
11154                 if not self._jobs and self._merge_wait_queue:
11155                         for task in self._merge_wait_queue:
11156                                 task.addExitListener(self._merge_wait_exit_handler)
11157                                 self._task_queues.merge.add(task)
11158                         self._status_display.merges = len(self._task_queues.merge)
11159                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11160                         del self._merge_wait_queue[:]
11161
11162                 self._schedule_tasks_imp()
11163                 self._status_display.display()
11164
11165                 state_change = 0
11166                 for q in self._task_queues.values():
11167                         if q.schedule():
11168                                 state_change += 1
11169
11170                 # Cancel prefetchers if they're the only reason
11171                 # the main poll loop is still running.
11172                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11173                         not (self._jobs or self._task_queues.merge) and \
11174                         self._task_queues.fetch:
11175                         self._task_queues.fetch.clear()
11176                         state_change += 1
11177
11178                 if state_change:
11179                         self._schedule_tasks_imp()
11180                         self._status_display.display()
11181
11182                 return self._keep_scheduling()
11183
11184         def _job_delay(self):
11185                 """
11186                 @rtype: bool
11187                 @returns: True if job scheduling should be delayed, False otherwise.
11188                 """
11189
11190                 if self._jobs and self._max_load is not None:
11191
11192                         current_time = time.time()
11193
11194                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11195                         if delay > self._job_delay_max:
11196                                 delay = self._job_delay_max
11197                         if (current_time - self._previous_job_start_time) < delay:
11198                                 return True
11199
11200                 return False
11201
11202         def _schedule_tasks_imp(self):
11203                 """
11204                 @rtype: bool
11205                 @returns: True if state changed, False otherwise.
11206                 """
11207
11208                 state_change = 0
11209
11210                 while True:
11211
11212                         if not self._keep_scheduling():
11213                                 return bool(state_change)
11214
11215                         if self._choose_pkg_return_early or \
11216                                 self._merge_wait_scheduled or \
11217                                 (self._jobs and self._unsatisfied_system_deps) or \
11218                                 not self._can_add_job() or \
11219                                 self._job_delay():
11220                                 return bool(state_change)
11221
11222                         pkg = self._choose_pkg()
11223                         if pkg is None:
11224                                 return bool(state_change)
11225
11226                         state_change += 1
11227
11228                         if not pkg.installed:
11229                                 self._pkg_count.curval += 1
11230
11231                         task = self._task(pkg)
11232
11233                         if pkg.installed:
11234                                 merge = PackageMerge(merge=task)
11235                                 merge.addExitListener(self._merge_exit)
11236                                 self._task_queues.merge.add(merge)
11237
11238                         elif pkg.built:
11239                                 self._jobs += 1
11240                                 self._previous_job_start_time = time.time()
11241                                 self._status_display.running = self._jobs
11242                                 task.addExitListener(self._extract_exit)
11243                                 self._task_queues.jobs.add(task)
11244
11245                         else:
11246                                 self._jobs += 1
11247                                 self._previous_job_start_time = time.time()
11248                                 self._status_display.running = self._jobs
11249                                 task.addExitListener(self._build_exit)
11250                                 self._task_queues.jobs.add(task)
11251
11252                 return bool(state_change)
11253
11254         def _task(self, pkg):
11255
11256                 pkg_to_replace = None
11257                 if pkg.operation != "uninstall":
11258                         vardb = pkg.root_config.trees["vartree"].dbapi
11259                         previous_cpv = vardb.match(pkg.slot_atom)
11260                         if previous_cpv:
11261                                 previous_cpv = previous_cpv.pop()
11262                                 pkg_to_replace = self._pkg(previous_cpv,
11263                                         "installed", pkg.root_config, installed=True)
11264
11265                 task = MergeListItem(args_set=self._args_set,
11266                         background=self._background, binpkg_opts=self._binpkg_opts,
11267                         build_opts=self._build_opts,
11268                         config_pool=self._ConfigPool(pkg.root,
11269                         self._allocate_config, self._deallocate_config),
11270                         emerge_opts=self.myopts,
11271                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11272                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11273                         pkg_to_replace=pkg_to_replace,
11274                         prefetcher=self._prefetchers.get(pkg),
11275                         scheduler=self._sched_iface,
11276                         settings=self._allocate_config(pkg.root),
11277                         statusMessage=self._status_msg,
11278                         world_atom=self._world_atom)
11279
11280                 return task
11281
11282         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11283                 pkg = failed_pkg.pkg
11284                 msg = "%s to %s %s" % \
11285                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11286                 if pkg.root != "/":
11287                         msg += " %s %s" % (preposition, pkg.root)
11288
11289                 log_path = self._locate_failure_log(failed_pkg)
11290                 if log_path is not None:
11291                         msg += ", Log file:"
11292                 self._status_msg(msg)
11293
11294                 if log_path is not None:
11295                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11296
11297         def _status_msg(self, msg):
11298                 """
11299                 Display a brief status message (no newlines) in the status display.
11300                 This is called by tasks to provide feedback to the user. This
11301                 delegates the resposibility of generating \r and \n control characters,
11302                 to guarantee that lines are created or erased when necessary and
11303                 appropriate.
11304
11305                 @type msg: str
11306                 @param msg: a brief status message (no newlines allowed)
11307                 """
11308                 if not self._background:
11309                         writemsg_level("\n")
11310                 self._status_display.displayMessage(msg)
11311
11312         def _save_resume_list(self):
11313                 """
11314                 Do this before verifying the ebuild Manifests since it might
11315                 be possible for the user to use --resume --skipfirst get past
11316                 a non-essential package with a broken digest.
11317                 """
11318                 mtimedb = self._mtimedb
11319                 mtimedb["resume"]["mergelist"] = [list(x) \
11320                         for x in self._mergelist \
11321                         if isinstance(x, Package) and x.operation == "merge"]
11322
11323                 mtimedb.commit()
11324
11325         def _calc_resume_list(self):
11326                 """
11327                 Use the current resume list to calculate a new one,
11328                 dropping any packages with unsatisfied deps.
11329                 @rtype: bool
11330                 @returns: True if successful, False otherwise.
11331                 """
11332                 print colorize("GOOD", "*** Resuming merge...")
11333
11334                 if self._show_list():
11335                         if "--tree" in self.myopts:
11336                                 portage.writemsg_stdout("\n" + \
11337                                         darkgreen("These are the packages that " + \
11338                                         "would be merged, in reverse order:\n\n"))
11339
11340                         else:
11341                                 portage.writemsg_stdout("\n" + \
11342                                         darkgreen("These are the packages that " + \
11343                                         "would be merged, in order:\n\n"))
11344
11345                 show_spinner = "--quiet" not in self.myopts and \
11346                         "--nodeps" not in self.myopts
11347
11348                 if show_spinner:
11349                         print "Calculating dependencies  ",
11350
11351                 myparams = create_depgraph_params(self.myopts, None)
11352                 success = False
11353                 e = None
11354                 try:
11355                         success, mydepgraph, dropped_tasks = resume_depgraph(
11356                                 self.settings, self.trees, self._mtimedb, self.myopts,
11357                                 myparams, self._spinner)
11358                 except depgraph.UnsatisfiedResumeDep, exc:
11359                         # rename variable to avoid python-3.0 error:
11360                         # SyntaxError: can not delete variable 'e' referenced in nested
11361                         #              scope
11362                         e = exc
11363                         mydepgraph = e.depgraph
11364                         dropped_tasks = set()
11365
11366                 if show_spinner:
11367                         print "\b\b... done!"
11368
11369                 if e is not None:
11370                         def unsatisfied_resume_dep_msg():
11371                                 mydepgraph.display_problems()
11372                                 out = portage.output.EOutput()
11373                                 out.eerror("One or more packages are either masked or " + \
11374                                         "have missing dependencies:")
11375                                 out.eerror("")
11376                                 indent = "  "
11377                                 show_parents = set()
11378                                 for dep in e.value:
11379                                         if dep.parent in show_parents:
11380                                                 continue
11381                                         show_parents.add(dep.parent)
11382                                         if dep.atom is None:
11383                                                 out.eerror(indent + "Masked package:")
11384                                                 out.eerror(2 * indent + str(dep.parent))
11385                                                 out.eerror("")
11386                                         else:
11387                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11388                                                 out.eerror(2 * indent + str(dep.parent))
11389                                                 out.eerror("")
11390                                 msg = "The resume list contains packages " + \
11391                                         "that are either masked or have " + \
11392                                         "unsatisfied dependencies. " + \
11393                                         "Please restart/continue " + \
11394                                         "the operation manually, or use --skipfirst " + \
11395                                         "to skip the first package in the list and " + \
11396                                         "any other packages that may be " + \
11397                                         "masked or have missing dependencies."
11398                                 for line in textwrap.wrap(msg, 72):
11399                                         out.eerror(line)
11400                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11401                         return False
11402
11403                 if success and self._show_list():
11404                         mylist = mydepgraph.altlist()
11405                         if mylist:
11406                                 if "--tree" in self.myopts:
11407                                         mylist.reverse()
11408                                 mydepgraph.display(mylist, favorites=self._favorites)
11409
11410                 if not success:
11411                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11412                         return False
11413                 mydepgraph.display_problems()
11414
11415                 mylist = mydepgraph.altlist()
11416                 mydepgraph.break_refs(mylist)
11417                 mydepgraph.break_refs(dropped_tasks)
11418                 self._mergelist = mylist
11419                 self._set_digraph(mydepgraph.schedulerGraph())
11420
11421                 msg_width = 75
11422                 for task in dropped_tasks:
11423                         if not (isinstance(task, Package) and task.operation == "merge"):
11424                                 continue
11425                         pkg = task
11426                         msg = "emerge --keep-going:" + \
11427                                 " %s" % (pkg.cpv,)
11428                         if pkg.root != "/":
11429                                 msg += " for %s" % (pkg.root,)
11430                         msg += " dropped due to unsatisfied dependency."
11431                         for line in textwrap.wrap(msg, msg_width):
11432                                 eerror(line, phase="other", key=pkg.cpv)
11433                         settings = self.pkgsettings[pkg.root]
11434                         # Ensure that log collection from $T is disabled inside
11435                         # elog_process(), since any logs that might exist are
11436                         # not valid here.
11437                         settings.pop("T", None)
11438                         portage.elog.elog_process(pkg.cpv, settings)
11439                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11440
11441                 return True
11442
11443         def _show_list(self):
11444                 myopts = self.myopts
11445                 if "--quiet" not in myopts and \
11446                         ("--ask" in myopts or "--tree" in myopts or \
11447                         "--verbose" in myopts):
11448                         return True
11449                 return False
11450
11451         def _world_atom(self, pkg):
11452                 """
11453                 Add the package to the world file, but only if
11454                 it's supposed to be added. Otherwise, do nothing.
11455                 """
11456
11457                 if set(("--buildpkgonly", "--fetchonly",
11458                         "--fetch-all-uri",
11459                         "--oneshot", "--onlydeps",
11460                         "--pretend")).intersection(self.myopts):
11461                         return
11462
11463                 if pkg.root != self.target_root:
11464                         return
11465
11466                 args_set = self._args_set
11467                 if not args_set.findAtomForPackage(pkg):
11468                         return
11469
11470                 logger = self._logger
11471                 pkg_count = self._pkg_count
11472                 root_config = pkg.root_config
11473                 world_set = root_config.sets["world"]
11474                 world_locked = False
11475                 if hasattr(world_set, "lock"):
11476                         world_set.lock()
11477                         world_locked = True
11478
11479                 try:
11480                         if hasattr(world_set, "load"):
11481                                 world_set.load() # maybe it's changed on disk
11482
11483                         atom = create_world_atom(pkg, args_set, root_config)
11484                         if atom:
11485                                 if hasattr(world_set, "add"):
11486                                         self._status_msg(('Recording %s in "world" ' + \
11487                                                 'favorites file...') % atom)
11488                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11489                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11490                                         world_set.add(atom)
11491                                 else:
11492                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11493                                                 (atom,), level=logging.WARN, noiselevel=-1)
11494                 finally:
11495                         if world_locked:
11496                                 world_set.unlock()
11497
11498         def _pkg(self, cpv, type_name, root_config, installed=False):
11499                 """
11500                 Get a package instance from the cache, or create a new
11501                 one if necessary. Raises KeyError from aux_get if it
11502                 failures for some reason (package does not exist or is
11503                 corrupt).
11504                 """
11505                 operation = "merge"
11506                 if installed:
11507                         operation = "nomerge"
11508
11509                 if self._digraph is not None:
11510                         # Reuse existing instance when available.
11511                         pkg = self._digraph.get(
11512                                 (type_name, root_config.root, cpv, operation))
11513                         if pkg is not None:
11514                                 return pkg
11515
11516                 tree_type = depgraph.pkg_tree_map[type_name]
11517                 db = root_config.trees[tree_type].dbapi
11518                 db_keys = list(self.trees[root_config.root][
11519                         tree_type].dbapi._aux_cache_keys)
11520                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11521                 pkg = Package(cpv=cpv, metadata=metadata,
11522                         root_config=root_config, installed=installed)
11523                 if type_name == "ebuild":
11524                         settings = self.pkgsettings[root_config.root]
11525                         settings.setcpv(pkg)
11526                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11527                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11528
11529                 return pkg
11530
11531 class MetadataRegen(PollScheduler):
11532
11533         def __init__(self, portdb, max_jobs=None, max_load=None):
11534                 PollScheduler.__init__(self)
11535                 self._portdb = portdb
11536
11537                 if max_jobs is None:
11538                         max_jobs = 1
11539
11540                 self._max_jobs = max_jobs
11541                 self._max_load = max_load
11542                 self._sched_iface = self._sched_iface_class(
11543                         register=self._register,
11544                         schedule=self._schedule_wait,
11545                         unregister=self._unregister)
11546
11547                 self._valid_pkgs = set()
11548                 self._process_iter = self._iter_metadata_processes()
11549                 self.returncode = os.EX_OK
11550                 self._error_count = 0
11551
11552         def _iter_metadata_processes(self):
11553                 portdb = self._portdb
11554                 valid_pkgs = self._valid_pkgs
11555                 every_cp = portdb.cp_all()
11556                 every_cp.sort(reverse=True)
11557
11558                 while every_cp:
11559                         cp = every_cp.pop()
11560                         portage.writemsg_stdout("Processing %s\n" % cp)
11561                         cpv_list = portdb.cp_list(cp)
11562                         for cpv in cpv_list:
11563                                 valid_pkgs.add(cpv)
11564                                 ebuild_path, repo_path = portdb.findname2(cpv)
11565                                 metadata_process = portdb._metadata_process(
11566                                         cpv, ebuild_path, repo_path)
11567                                 if metadata_process is None:
11568                                         continue
11569                                 yield metadata_process
11570
11571         def run(self):
11572
11573                 portdb = self._portdb
11574                 from portage.cache.cache_errors import CacheError
11575                 dead_nodes = {}
11576
11577                 for mytree in portdb.porttrees:
11578                         try:
11579                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11580                         except CacheError, e:
11581                                 portage.writemsg("Error listing cache entries for " + \
11582                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11583                                 del e
11584                                 dead_nodes = None
11585                                 break
11586
11587                 while self._schedule():
11588                         self._poll_loop()
11589
11590                 while self._jobs:
11591                         self._poll_loop()
11592
11593                 if dead_nodes:
11594                         for y in self._valid_pkgs:
11595                                 for mytree in portdb.porttrees:
11596                                         if portdb.findname2(y, mytree=mytree)[0]:
11597                                                 dead_nodes[mytree].discard(y)
11598
11599                         for mytree, nodes in dead_nodes.iteritems():
11600                                 auxdb = portdb.auxdb[mytree]
11601                                 for y in nodes:
11602                                         try:
11603                                                 del auxdb[y]
11604                                         except (KeyError, CacheError):
11605                                                 pass
11606
11607         def _schedule_tasks(self):
11608                 """
11609                 @rtype: bool
11610                 @returns: True if there may be remaining tasks to schedule,
11611                         False otherwise.
11612                 """
11613                 while self._can_add_job():
11614                         try:
11615                                 metadata_process = self._process_iter.next()
11616                         except StopIteration:
11617                                 return False
11618
11619                         self._jobs += 1
11620                         metadata_process.scheduler = self._sched_iface
11621                         metadata_process.addExitListener(self._metadata_exit)
11622                         metadata_process.start()
11623                 return True
11624
11625         def _metadata_exit(self, metadata_process):
11626                 self._jobs -= 1
11627                 if metadata_process.returncode != os.EX_OK:
11628                         self.returncode = 1
11629                         self._error_count += 1
11630                         self._valid_pkgs.discard(metadata_process.cpv)
11631                         portage.writemsg("Error processing %s, continuing...\n" % \
11632                                 (metadata_process.cpv,))
11633                 self._schedule()
11634
11635 class UninstallFailure(portage.exception.PortageException):
11636         """
11637         An instance of this class is raised by unmerge() when
11638         an uninstallation fails.
11639         """
11640         status = 1
11641         def __init__(self, *pargs):
11642                 portage.exception.PortageException.__init__(self, pargs)
11643                 if pargs:
11644                         self.status = pargs[0]
11645
11646 def unmerge(root_config, myopts, unmerge_action,
11647         unmerge_files, ldpath_mtimes, autoclean=0,
11648         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11649         scheduler=None, writemsg_level=portage.util.writemsg_level):
11650
11651         quiet = "--quiet" in myopts
11652         settings = root_config.settings
11653         sets = root_config.sets
11654         vartree = root_config.trees["vartree"]
11655         candidate_catpkgs=[]
11656         global_unmerge=0
11657         xterm_titles = "notitles" not in settings.features
11658         out = portage.output.EOutput()
11659         pkg_cache = {}
11660         db_keys = list(vartree.dbapi._aux_cache_keys)
11661
11662         def _pkg(cpv):
11663                 pkg = pkg_cache.get(cpv)
11664                 if pkg is None:
11665                         pkg = Package(cpv=cpv, installed=True,
11666                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11667                                 root_config=root_config,
11668                                 type_name="installed")
11669                         pkg_cache[cpv] = pkg
11670                 return pkg
11671
11672         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11673         try:
11674                 # At least the parent needs to exist for the lock file.
11675                 portage.util.ensure_dirs(vdb_path)
11676         except portage.exception.PortageException:
11677                 pass
11678         vdb_lock = None
11679         try:
11680                 if os.access(vdb_path, os.W_OK):
11681                         vdb_lock = portage.locks.lockdir(vdb_path)
11682                 realsyslist = sets["system"].getAtoms()
11683                 syslist = []
11684                 for x in realsyslist:
11685                         mycp = portage.dep_getkey(x)
11686                         if mycp in settings.getvirtuals():
11687                                 providers = []
11688                                 for provider in settings.getvirtuals()[mycp]:
11689                                         if vartree.dbapi.match(provider):
11690                                                 providers.append(provider)
11691                                 if len(providers) == 1:
11692                                         syslist.extend(providers)
11693                         else:
11694                                 syslist.append(mycp)
11695         
11696                 mysettings = portage.config(clone=settings)
11697         
11698                 if not unmerge_files:
11699                         if unmerge_action == "unmerge":
11700                                 print
11701                                 print bold("emerge unmerge") + " can only be used with specific package names"
11702                                 print
11703                                 return 0
11704                         else:
11705                                 global_unmerge = 1
11706         
11707                 localtree = vartree
11708                 # process all arguments and add all
11709                 # valid db entries to candidate_catpkgs
11710                 if global_unmerge:
11711                         if not unmerge_files:
11712                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11713                 else:
11714                         #we've got command-line arguments
11715                         if not unmerge_files:
11716                                 print "\nNo packages to unmerge have been provided.\n"
11717                                 return 0
11718                         for x in unmerge_files:
11719                                 arg_parts = x.split('/')
11720                                 if x[0] not in [".","/"] and \
11721                                         arg_parts[-1][-7:] != ".ebuild":
11722                                         #possible cat/pkg or dep; treat as such
11723                                         candidate_catpkgs.append(x)
11724                                 elif unmerge_action in ["prune","clean"]:
11725                                         print "\n!!! Prune and clean do not accept individual" + \
11726                                                 " ebuilds as arguments;\n    skipping.\n"
11727                                         continue
11728                                 else:
11729                                         # it appears that the user is specifying an installed
11730                                         # ebuild and we're in "unmerge" mode, so it's ok.
11731                                         if not os.path.exists(x):
11732                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11733                                                 return 0
11734         
11735                                         absx   = os.path.abspath(x)
11736                                         sp_absx = absx.split("/")
11737                                         if sp_absx[-1][-7:] == ".ebuild":
11738                                                 del sp_absx[-1]
11739                                                 absx = "/".join(sp_absx)
11740         
11741                                         sp_absx_len = len(sp_absx)
11742         
11743                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11744                                         vdb_len  = len(vdb_path)
11745         
11746                                         sp_vdb     = vdb_path.split("/")
11747                                         sp_vdb_len = len(sp_vdb)
11748         
11749                                         if not os.path.exists(absx+"/CONTENTS"):
11750                                                 print "!!! Not a valid db dir: "+str(absx)
11751                                                 return 0
11752         
11753                                         if sp_absx_len <= sp_vdb_len:
11754                                                 # The Path is shorter... so it can't be inside the vdb.
11755                                                 print sp_absx
11756                                                 print absx
11757                                                 print "\n!!!",x,"cannot be inside "+ \
11758                                                         vdb_path+"; aborting.\n"
11759                                                 return 0
11760         
11761                                         for idx in range(0,sp_vdb_len):
11762                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11763                                                         print sp_absx
11764                                                         print absx
11765                                                         print "\n!!!", x, "is not inside "+\
11766                                                                 vdb_path+"; aborting.\n"
11767                                                         return 0
11768         
11769                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11770                                         candidate_catpkgs.append(
11771                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11772         
11773                 newline=""
11774                 if (not "--quiet" in myopts):
11775                         newline="\n"
11776                 if settings["ROOT"] != "/":
11777                         writemsg_level(darkgreen(newline+ \
11778                                 ">>> Using system located in ROOT tree %s\n" % \
11779                                 settings["ROOT"]))
11780
11781                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11782                         not ("--quiet" in myopts):
11783                         writemsg_level(darkgreen(newline+\
11784                                 ">>> These are the packages that would be unmerged:\n"))
11785
11786                 # Preservation of order is required for --depclean and --prune so
11787                 # that dependencies are respected. Use all_selected to eliminate
11788                 # duplicate packages since the same package may be selected by
11789                 # multiple atoms.
11790                 pkgmap = []
11791                 all_selected = set()
11792                 for x in candidate_catpkgs:
11793                         # cycle through all our candidate deps and determine
11794                         # what will and will not get unmerged
11795                         try:
11796                                 mymatch = vartree.dbapi.match(x)
11797                         except portage.exception.AmbiguousPackageName, errpkgs:
11798                                 print "\n\n!!! The short ebuild name \"" + \
11799                                         x + "\" is ambiguous.  Please specify"
11800                                 print "!!! one of the following fully-qualified " + \
11801                                         "ebuild names instead:\n"
11802                                 for i in errpkgs[0]:
11803                                         print "    " + green(i)
11804                                 print
11805                                 sys.exit(1)
11806         
11807                         if not mymatch and x[0] not in "<>=~":
11808                                 mymatch = localtree.dep_match(x)
11809                         if not mymatch:
11810                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11811                                         (x, unmerge_action), noiselevel=-1)
11812                                 continue
11813
11814                         pkgmap.append(
11815                                 {"protected": set(), "selected": set(), "omitted": set()})
11816                         mykey = len(pkgmap) - 1
11817                         if unmerge_action=="unmerge":
11818                                         for y in mymatch:
11819                                                 if y not in all_selected:
11820                                                         pkgmap[mykey]["selected"].add(y)
11821                                                         all_selected.add(y)
11822                         elif unmerge_action == "prune":
11823                                 if len(mymatch) == 1:
11824                                         continue
11825                                 best_version = mymatch[0]
11826                                 best_slot = vartree.getslot(best_version)
11827                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11828                                 for mypkg in mymatch[1:]:
11829                                         myslot = vartree.getslot(mypkg)
11830                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11831                                         if (myslot == best_slot and mycounter > best_counter) or \
11832                                                 mypkg == portage.best([mypkg, best_version]):
11833                                                 if myslot == best_slot:
11834                                                         if mycounter < best_counter:
11835                                                                 # On slot collision, keep the one with the
11836                                                                 # highest counter since it is the most
11837                                                                 # recently installed.
11838                                                                 continue
11839                                                 best_version = mypkg
11840                                                 best_slot = myslot
11841                                                 best_counter = mycounter
11842                                 pkgmap[mykey]["protected"].add(best_version)
11843                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11844                                         if mypkg != best_version and mypkg not in all_selected)
11845                                 all_selected.update(pkgmap[mykey]["selected"])
11846                         else:
11847                                 # unmerge_action == "clean"
11848                                 slotmap={}
11849                                 for mypkg in mymatch:
11850                                         if unmerge_action == "clean":
11851                                                 myslot = localtree.getslot(mypkg)
11852                                         else:
11853                                                 # since we're pruning, we don't care about slots
11854                                                 # and put all the pkgs in together
11855                                                 myslot = 0
11856                                         if myslot not in slotmap:
11857                                                 slotmap[myslot] = {}
11858                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11859
11860                                 for mypkg in vartree.dbapi.cp_list(
11861                                         portage.dep_getkey(mymatch[0])):
11862                                         myslot = vartree.getslot(mypkg)
11863                                         if myslot not in slotmap:
11864                                                 slotmap[myslot] = {}
11865                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11866
11867                                 for myslot in slotmap:
11868                                         counterkeys = slotmap[myslot].keys()
11869                                         if not counterkeys:
11870                                                 continue
11871                                         counterkeys.sort()
11872                                         pkgmap[mykey]["protected"].add(
11873                                                 slotmap[myslot][counterkeys[-1]])
11874                                         del counterkeys[-1]
11875
11876                                         for counter in counterkeys[:]:
11877                                                 mypkg = slotmap[myslot][counter]
11878                                                 if mypkg not in mymatch:
11879                                                         counterkeys.remove(counter)
11880                                                         pkgmap[mykey]["protected"].add(
11881                                                                 slotmap[myslot][counter])
11882
11883                                         #be pretty and get them in order of merge:
11884                                         for ckey in counterkeys:
11885                                                 mypkg = slotmap[myslot][ckey]
11886                                                 if mypkg not in all_selected:
11887                                                         pkgmap[mykey]["selected"].add(mypkg)
11888                                                         all_selected.add(mypkg)
11889                                         # ok, now the last-merged package
11890                                         # is protected, and the rest are selected
11891                 numselected = len(all_selected)
11892                 if global_unmerge and not numselected:
11893                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11894                         return 0
11895         
11896                 if not numselected:
11897                         portage.writemsg_stdout(
11898                                 "\n>>> No packages selected for removal by " + \
11899                                 unmerge_action + "\n")
11900                         return 0
11901         finally:
11902                 if vdb_lock:
11903                         vartree.dbapi.flush_cache()
11904                         portage.locks.unlockdir(vdb_lock)
11905         
11906         from portage.sets.base import EditablePackageSet
11907         
11908         # generate a list of package sets that are directly or indirectly listed in "world",
11909         # as there is no persistent list of "installed" sets
11910         installed_sets = ["world"]
11911         stop = False
11912         pos = 0
11913         while not stop:
11914                 stop = True
11915                 pos = len(installed_sets)
11916                 for s in installed_sets[pos - 1:]:
11917                         if s not in sets:
11918                                 continue
11919                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11920                         if candidates:
11921                                 stop = False
11922                                 installed_sets += candidates
11923         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11924         del stop, pos
11925
11926         # we don't want to unmerge packages that are still listed in user-editable package sets
11927         # listed in "world" as they would be remerged on the next update of "world" or the 
11928         # relevant package sets.
11929         unknown_sets = set()
11930         for cp in xrange(len(pkgmap)):
11931                 for cpv in pkgmap[cp]["selected"].copy():
11932                         try:
11933                                 pkg = _pkg(cpv)
11934                         except KeyError:
11935                                 # It could have been uninstalled
11936                                 # by a concurrent process.
11937                                 continue
11938
11939                         if unmerge_action != "clean" and \
11940                                 root_config.root == "/" and \
11941                                 portage.match_from_list(
11942                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11943                                 msg = ("Not unmerging package %s since there is no valid " + \
11944                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11945                                 for line in textwrap.wrap(msg, 75):
11946                                         out.eerror(line)
11947                                 # adjust pkgmap so the display output is correct
11948                                 pkgmap[cp]["selected"].remove(cpv)
11949                                 all_selected.remove(cpv)
11950                                 pkgmap[cp]["protected"].add(cpv)
11951                                 continue
11952
11953                         parents = []
11954                         for s in installed_sets:
11955                                 # skip sets that the user requested to unmerge, and skip world 
11956                                 # unless we're unmerging a package set (as the package would be 
11957                                 # removed from "world" later on)
11958                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11959                                         continue
11960
11961                                 if s not in sets:
11962                                         if s in unknown_sets:
11963                                                 continue
11964                                         unknown_sets.add(s)
11965                                         out = portage.output.EOutput()
11966                                         out.eerror(("Unknown set '@%s' in " + \
11967                                                 "%svar/lib/portage/world_sets") % \
11968                                                 (s, root_config.root))
11969                                         continue
11970
11971                                 # only check instances of EditablePackageSet as other classes are generally used for
11972                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11973                                 # user can't do much about them anyway)
11974                                 if isinstance(sets[s], EditablePackageSet):
11975
11976                                         # This is derived from a snippet of code in the
11977                                         # depgraph._iter_atoms_for_pkg() method.
11978                                         for atom in sets[s].iterAtomsForPackage(pkg):
11979                                                 inst_matches = vartree.dbapi.match(atom)
11980                                                 inst_matches.reverse() # descending order
11981                                                 higher_slot = None
11982                                                 for inst_cpv in inst_matches:
11983                                                         try:
11984                                                                 inst_pkg = _pkg(inst_cpv)
11985                                                         except KeyError:
11986                                                                 # It could have been uninstalled
11987                                                                 # by a concurrent process.
11988                                                                 continue
11989
11990                                                         if inst_pkg.cp != atom.cp:
11991                                                                 continue
11992                                                         if pkg >= inst_pkg:
11993                                                                 # This is descending order, and we're not
11994                                                                 # interested in any versions <= pkg given.
11995                                                                 break
11996                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11997                                                                 higher_slot = inst_pkg
11998                                                                 break
11999                                                 if higher_slot is None:
12000                                                         parents.append(s)
12001                                                         break
12002                         if parents:
12003                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12004                                 #print colorize("WARN", "but still listed in the following package sets:")
12005                                 #print "    %s\n" % ", ".join(parents)
12006                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12007                                 print colorize("WARN", "still referenced by the following package sets:")
12008                                 print "    %s\n" % ", ".join(parents)
12009                                 # adjust pkgmap so the display output is correct
12010                                 pkgmap[cp]["selected"].remove(cpv)
12011                                 all_selected.remove(cpv)
12012                                 pkgmap[cp]["protected"].add(cpv)
12013         
12014         del installed_sets
12015
12016         numselected = len(all_selected)
12017         if not numselected:
12018                 writemsg_level(
12019                         "\n>>> No packages selected for removal by " + \
12020                         unmerge_action + "\n")
12021                 return 0
12022
12023         # Unmerge order only matters in some cases
12024         if not ordered:
12025                 unordered = {}
12026                 for d in pkgmap:
12027                         selected = d["selected"]
12028                         if not selected:
12029                                 continue
12030                         cp = portage.cpv_getkey(iter(selected).next())
12031                         cp_dict = unordered.get(cp)
12032                         if cp_dict is None:
12033                                 cp_dict = {}
12034                                 unordered[cp] = cp_dict
12035                                 for k in d:
12036                                         cp_dict[k] = set()
12037                         for k, v in d.iteritems():
12038                                 cp_dict[k].update(v)
12039                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12040
12041         for x in xrange(len(pkgmap)):
12042                 selected = pkgmap[x]["selected"]
12043                 if not selected:
12044                         continue
12045                 for mytype, mylist in pkgmap[x].iteritems():
12046                         if mytype == "selected":
12047                                 continue
12048                         mylist.difference_update(all_selected)
12049                 cp = portage.cpv_getkey(iter(selected).next())
12050                 for y in localtree.dep_match(cp):
12051                         if y not in pkgmap[x]["omitted"] and \
12052                                 y not in pkgmap[x]["selected"] and \
12053                                 y not in pkgmap[x]["protected"] and \
12054                                 y not in all_selected:
12055                                 pkgmap[x]["omitted"].add(y)
12056                 if global_unmerge and not pkgmap[x]["selected"]:
12057                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12058                         continue
12059                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12060                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12061                                 "'%s' is part of your system profile.\n" % cp),
12062                                 level=logging.WARNING, noiselevel=-1)
12063                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12064                                 "be damaging to your system.\n\n"),
12065                                 level=logging.WARNING, noiselevel=-1)
12066                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12067                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12068                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12069                 if not quiet:
12070                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12071                 else:
12072                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12073                 for mytype in ["selected","protected","omitted"]:
12074                         if not quiet:
12075                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12076                         if pkgmap[x][mytype]:
12077                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12078                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12079                                 for pn, ver, rev in sorted_pkgs:
12080                                         if rev == "r0":
12081                                                 myversion = ver
12082                                         else:
12083                                                 myversion = ver + "-" + rev
12084                                         if mytype == "selected":
12085                                                 writemsg_level(
12086                                                         colorize("UNMERGE_WARN", myversion + " "),
12087                                                         noiselevel=-1)
12088                                         else:
12089                                                 writemsg_level(
12090                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12091                         else:
12092                                 writemsg_level("none ", noiselevel=-1)
12093                         if not quiet:
12094                                 writemsg_level("\n", noiselevel=-1)
12095                 if quiet:
12096                         writemsg_level("\n", noiselevel=-1)
12097
12098         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12099                 " packages are slated for removal.\n")
12100         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12101                         " and " + colorize("GOOD", "'omitted'") + \
12102                         " packages will not be removed.\n\n")
12103
12104         if "--pretend" in myopts:
12105                 #we're done... return
12106                 return 0
12107         if "--ask" in myopts:
12108                 if userquery("Would you like to unmerge these packages?")=="No":
12109                         # enter pretend mode for correct formatting of results
12110                         myopts["--pretend"] = True
12111                         print
12112                         print "Quitting."
12113                         print
12114                         return 0
12115         #the real unmerging begins, after a short delay....
12116         if clean_delay and not autoclean:
12117                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12118
12119         for x in xrange(len(pkgmap)):
12120                 for y in pkgmap[x]["selected"]:
12121                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12122                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12123                         mysplit = y.split("/")
12124                         #unmerge...
12125                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12126                                 mysettings, unmerge_action not in ["clean","prune"],
12127                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12128                                 scheduler=scheduler)
12129
12130                         if retval != os.EX_OK:
12131                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12132                                 if raise_on_error:
12133                                         raise UninstallFailure(retval)
12134                                 sys.exit(retval)
12135                         else:
12136                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12137                                         sets["world"].cleanPackage(vartree.dbapi, y)
12138                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12139         if clean_world and hasattr(sets["world"], "remove"):
12140                 for s in root_config.setconfig.active:
12141                         sets["world"].remove(SETPREFIX+s)
12142         return 1
12143
12144 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12145
12146         if os.path.exists("/usr/bin/install-info"):
12147                 out = portage.output.EOutput()
12148                 regen_infodirs=[]
12149                 for z in infodirs:
12150                         if z=='':
12151                                 continue
12152                         inforoot=normpath(root+z)
12153                         if os.path.isdir(inforoot):
12154                                 infomtime = long(os.stat(inforoot).st_mtime)
12155                                 if inforoot not in prev_mtimes or \
12156                                         prev_mtimes[inforoot] != infomtime:
12157                                                 regen_infodirs.append(inforoot)
12158
12159                 if not regen_infodirs:
12160                         portage.writemsg_stdout("\n")
12161                         out.einfo("GNU info directory index is up-to-date.")
12162                 else:
12163                         portage.writemsg_stdout("\n")
12164                         out.einfo("Regenerating GNU info directory index...")
12165
12166                         dir_extensions = ("", ".gz", ".bz2")
12167                         icount=0
12168                         badcount=0
12169                         errmsg = ""
12170                         for inforoot in regen_infodirs:
12171                                 if inforoot=='':
12172                                         continue
12173
12174                                 if not os.path.isdir(inforoot) or \
12175                                         not os.access(inforoot, os.W_OK):
12176                                         continue
12177
12178                                 file_list = os.listdir(inforoot)
12179                                 file_list.sort()
12180                                 dir_file = os.path.join(inforoot, "dir")
12181                                 moved_old_dir = False
12182                                 processed_count = 0
12183                                 for x in file_list:
12184                                         if x.startswith(".") or \
12185                                                 os.path.isdir(os.path.join(inforoot, x)):
12186                                                 continue
12187                                         if x.startswith("dir"):
12188                                                 skip = False
12189                                                 for ext in dir_extensions:
12190                                                         if x == "dir" + ext or \
12191                                                                 x == "dir" + ext + ".old":
12192                                                                 skip = True
12193                                                                 break
12194                                                 if skip:
12195                                                         continue
12196                                         if processed_count == 0:
12197                                                 for ext in dir_extensions:
12198                                                         try:
12199                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12200                                                                 moved_old_dir = True
12201                                                         except EnvironmentError, e:
12202                                                                 if e.errno != errno.ENOENT:
12203                                                                         raise
12204                                                                 del e
12205                                         processed_count += 1
12206                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12207                                         existsstr="already exists, for file `"
12208                                         if myso!="":
12209                                                 if re.search(existsstr,myso):
12210                                                         # Already exists... Don't increment the count for this.
12211                                                         pass
12212                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12213                                                         # This info file doesn't contain a DIR-header: install-info produces this
12214                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12215                                                         # Don't increment the count for this.
12216                                                         pass
12217                                                 else:
12218                                                         badcount=badcount+1
12219                                                         errmsg += myso + "\n"
12220                                         icount=icount+1
12221
12222                                 if moved_old_dir and not os.path.exists(dir_file):
12223                                         # We didn't generate a new dir file, so put the old file
12224                                         # back where it was originally found.
12225                                         for ext in dir_extensions:
12226                                                 try:
12227                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12228                                                 except EnvironmentError, e:
12229                                                         if e.errno != errno.ENOENT:
12230                                                                 raise
12231                                                         del e
12232
12233                                 # Clean dir.old cruft so that they don't prevent
12234                                 # unmerge of otherwise empty directories.
12235                                 for ext in dir_extensions:
12236                                         try:
12237                                                 os.unlink(dir_file + ext + ".old")
12238                                         except EnvironmentError, e:
12239                                                 if e.errno != errno.ENOENT:
12240                                                         raise
12241                                                 del e
12242
12243                                 #update mtime so we can potentially avoid regenerating.
12244                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12245
12246                         if badcount:
12247                                 out.eerror("Processed %d info files; %d errors." % \
12248                                         (icount, badcount))
12249                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12250                         else:
12251                                 if icount > 0:
12252                                         out.einfo("Processed %d info files." % (icount,))
12253
12254
12255 def display_news_notification(root_config, myopts):
12256         target_root = root_config.root
12257         trees = root_config.trees
12258         settings = trees["vartree"].settings
12259         portdb = trees["porttree"].dbapi
12260         vardb = trees["vartree"].dbapi
12261         NEWS_PATH = os.path.join("metadata", "news")
12262         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12263         newsReaderDisplay = False
12264         update = "--pretend" not in myopts
12265
12266         for repo in portdb.getRepositories():
12267                 unreadItems = checkUpdatedNewsItems(
12268                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12269                 if unreadItems:
12270                         if not newsReaderDisplay:
12271                                 newsReaderDisplay = True
12272                                 print
12273                         print colorize("WARN", " * IMPORTANT:"),
12274                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12275                         
12276         
12277         if newsReaderDisplay:
12278                 print colorize("WARN", " *"),
12279                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12280                 print
12281
12282 def display_preserved_libs(vardbapi):
12283         MAX_DISPLAY = 3
12284
12285         # Ensure the registry is consistent with existing files.
12286         vardbapi.plib_registry.pruneNonExisting()
12287
12288         if vardbapi.plib_registry.hasEntries():
12289                 print
12290                 print colorize("WARN", "!!!") + " existing preserved libs:"
12291                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12292                 linkmap = vardbapi.linkmap
12293                 consumer_map = {}
12294                 owners = {}
12295                 linkmap_broken = False
12296
12297                 try:
12298                         linkmap.rebuild()
12299                 except portage.exception.CommandNotFound, e:
12300                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12301                                 level=logging.ERROR, noiselevel=-1)
12302                         del e
12303                         linkmap_broken = True
12304                 else:
12305                         search_for_owners = set()
12306                         for cpv in plibdata:
12307                                 internal_plib_keys = set(linkmap._obj_key(f) \
12308                                         for f in plibdata[cpv])
12309                                 for f in plibdata[cpv]:
12310                                         if f in consumer_map:
12311                                                 continue
12312                                         consumers = []
12313                                         for c in linkmap.findConsumers(f):
12314                                                 # Filter out any consumers that are also preserved libs
12315                                                 # belonging to the same package as the provider.
12316                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12317                                                         consumers.append(c)
12318                                         consumers.sort()
12319                                         consumer_map[f] = consumers
12320                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12321
12322                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12323
12324                 for cpv in plibdata:
12325                         print colorize("WARN", ">>>") + " package: %s" % cpv
12326                         samefile_map = {}
12327                         for f in plibdata[cpv]:
12328                                 obj_key = linkmap._obj_key(f)
12329                                 alt_paths = samefile_map.get(obj_key)
12330                                 if alt_paths is None:
12331                                         alt_paths = set()
12332                                         samefile_map[obj_key] = alt_paths
12333                                 alt_paths.add(f)
12334
12335                         for alt_paths in samefile_map.itervalues():
12336                                 alt_paths = sorted(alt_paths)
12337                                 for p in alt_paths:
12338                                         print colorize("WARN", " * ") + " - %s" % (p,)
12339                                 f = alt_paths[0]
12340                                 consumers = consumer_map.get(f, [])
12341                                 for c in consumers[:MAX_DISPLAY]:
12342                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12343                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12344                                 if len(consumers) == MAX_DISPLAY + 1:
12345                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12346                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12347                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12348                                 elif len(consumers) > MAX_DISPLAY:
12349                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12350                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12351
12352
12353 def _flush_elog_mod_echo():
12354         """
12355         Dump the mod_echo output now so that our other
12356         notifications are shown last.
12357         @rtype: bool
12358         @returns: True if messages were shown, False otherwise.
12359         """
12360         messages_shown = False
12361         try:
12362                 from portage.elog import mod_echo
12363         except ImportError:
12364                 pass # happens during downgrade to a version without the module
12365         else:
12366                 messages_shown = bool(mod_echo._items)
12367                 mod_echo.finalize()
12368         return messages_shown
12369
12370 def post_emerge(root_config, myopts, mtimedb, retval):
12371         """
12372         Misc. things to run at the end of a merge session.
12373         
12374         Update Info Files
12375         Update Config Files
12376         Update News Items
12377         Commit mtimeDB
12378         Display preserved libs warnings
12379         Exit Emerge
12380
12381         @param trees: A dictionary mapping each ROOT to it's package databases
12382         @type trees: dict
12383         @param mtimedb: The mtimeDB to store data needed across merge invocations
12384         @type mtimedb: MtimeDB class instance
12385         @param retval: Emerge's return value
12386         @type retval: Int
12387         @rype: None
12388         @returns:
12389         1.  Calls sys.exit(retval)
12390         """
12391
12392         target_root = root_config.root
12393         trees = { target_root : root_config.trees }
12394         vardbapi = trees[target_root]["vartree"].dbapi
12395         settings = vardbapi.settings
12396         info_mtimes = mtimedb["info"]
12397
12398         # Load the most current variables from ${ROOT}/etc/profile.env
12399         settings.unlock()
12400         settings.reload()
12401         settings.regenerate()
12402         settings.lock()
12403
12404         config_protect = settings.get("CONFIG_PROTECT","").split()
12405         infodirs = settings.get("INFOPATH","").split(":") + \
12406                 settings.get("INFODIR","").split(":")
12407
12408         os.chdir("/")
12409
12410         if retval == os.EX_OK:
12411                 exit_msg = " *** exiting successfully."
12412         else:
12413                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12414         emergelog("notitles" not in settings.features, exit_msg)
12415
12416         _flush_elog_mod_echo()
12417
12418         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12419         if "--pretend" in myopts or (counter_hash is not None and \
12420                 counter_hash == vardbapi._counter_hash()):
12421                 display_news_notification(root_config, myopts)
12422                 # If vdb state has not changed then there's nothing else to do.
12423                 sys.exit(retval)
12424
12425         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12426         portage.util.ensure_dirs(vdb_path)
12427         vdb_lock = None
12428         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12429                 vdb_lock = portage.locks.lockdir(vdb_path)
12430
12431         if vdb_lock:
12432                 try:
12433                         if "noinfo" not in settings.features:
12434                                 chk_updated_info_files(target_root,
12435                                         infodirs, info_mtimes, retval)
12436                         mtimedb.commit()
12437                 finally:
12438                         if vdb_lock:
12439                                 portage.locks.unlockdir(vdb_lock)
12440
12441         chk_updated_cfg_files(target_root, config_protect)
12442         
12443         display_news_notification(root_config, myopts)
12444         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12445                 display_preserved_libs(vardbapi)        
12446
12447         sys.exit(retval)
12448
12449
12450 def chk_updated_cfg_files(target_root, config_protect):
12451         if config_protect:
12452                 #number of directories with some protect files in them
12453                 procount=0
12454                 for x in config_protect:
12455                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12456                         if not os.access(x, os.W_OK):
12457                                 # Avoid Permission denied errors generated
12458                                 # later by `find`.
12459                                 continue
12460                         try:
12461                                 mymode = os.lstat(x).st_mode
12462                         except OSError:
12463                                 continue
12464                         if stat.S_ISLNK(mymode):
12465                                 # We want to treat it like a directory if it
12466                                 # is a symlink to an existing directory.
12467                                 try:
12468                                         real_mode = os.stat(x).st_mode
12469                                         if stat.S_ISDIR(real_mode):
12470                                                 mymode = real_mode
12471                                 except OSError:
12472                                         pass
12473                         if stat.S_ISDIR(mymode):
12474                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12475                         else:
12476                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12477                                         os.path.split(x.rstrip(os.path.sep))
12478                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12479                         a = commands.getstatusoutput(mycommand)
12480                         if a[0] != 0:
12481                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12482                                 sys.stderr.flush()
12483                                 # Show the error message alone, sending stdout to /dev/null.
12484                                 os.system(mycommand + " 1>/dev/null")
12485                         else:
12486                                 files = a[1].split('\0')
12487                                 # split always produces an empty string as the last element
12488                                 if files and not files[-1]:
12489                                         del files[-1]
12490                                 if files:
12491                                         procount += 1
12492                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12493                                         if stat.S_ISDIR(mymode):
12494                                                  print "%d config files in '%s' need updating." % \
12495                                                         (len(files), x)
12496                                         else:
12497                                                  print "config file '%s' needs updating." % x
12498
12499                 if procount:
12500                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12501                                 " section of the " + bold("emerge")
12502                         print " "+yellow("*")+" man page to learn how to update config files."
12503
12504 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12505         update=False):
12506         """
12507         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12508         Returns the number of unread (yet relevent) items.
12509         
12510         @param portdb: a portage tree database
12511         @type portdb: pordbapi
12512         @param vardb: an installed package database
12513         @type vardb: vardbapi
12514         @param NEWS_PATH:
12515         @type NEWS_PATH:
12516         @param UNREAD_PATH:
12517         @type UNREAD_PATH:
12518         @param repo_id:
12519         @type repo_id:
12520         @rtype: Integer
12521         @returns:
12522         1.  The number of unread but relevant news items.
12523         
12524         """
12525         from portage.news import NewsManager
12526         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12527         return manager.getUnreadItems( repo_id, update=update )
12528
12529 def insert_category_into_atom(atom, category):
12530         alphanum = re.search(r'\w', atom)
12531         if alphanum:
12532                 ret = atom[:alphanum.start()] + "%s/" % category + \
12533                         atom[alphanum.start():]
12534         else:
12535                 ret = None
12536         return ret
12537
12538 def is_valid_package_atom(x):
12539         if "/" not in x:
12540                 alphanum = re.search(r'\w', x)
12541                 if alphanum:
12542                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12543         return portage.isvalidatom(x)
12544
12545 def show_blocker_docs_link():
12546         print
12547         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12548         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12549         print
12550         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12551         print
12552
12553 def show_mask_docs():
12554         print "For more information, see the MASKED PACKAGES section in the emerge"
12555         print "man page or refer to the Gentoo Handbook."
12556
12557 def action_sync(settings, trees, mtimedb, myopts, myaction):
12558         xterm_titles = "notitles" not in settings.features
12559         emergelog(xterm_titles, " === sync")
12560         myportdir = settings.get("PORTDIR", None)
12561         out = portage.output.EOutput()
12562         if not myportdir:
12563                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12564                 sys.exit(1)
12565         if myportdir[-1]=="/":
12566                 myportdir=myportdir[:-1]
12567         try:
12568                 st = os.stat(myportdir)
12569         except OSError:
12570                 st = None
12571         if st is None:
12572                 print ">>>",myportdir,"not found, creating it."
12573                 os.makedirs(myportdir,0755)
12574                 st = os.stat(myportdir)
12575
12576         spawn_kwargs = {}
12577         spawn_kwargs["env"] = settings.environ()
12578         if 'usersync' in settings.features and \
12579                 portage.data.secpass >= 2 and \
12580                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12581                 st.st_gid != os.getgid() and st.st_mode & 0070):
12582                 try:
12583                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12584                 except KeyError:
12585                         pass
12586                 else:
12587                         # Drop privileges when syncing, in order to match
12588                         # existing uid/gid settings.
12589                         spawn_kwargs["uid"]    = st.st_uid
12590                         spawn_kwargs["gid"]    = st.st_gid
12591                         spawn_kwargs["groups"] = [st.st_gid]
12592                         spawn_kwargs["env"]["HOME"] = homedir
12593                         umask = 0002
12594                         if not st.st_mode & 0020:
12595                                 umask = umask | 0020
12596                         spawn_kwargs["umask"] = umask
12597
12598         syncuri = settings.get("SYNC", "").strip()
12599         if not syncuri:
12600                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12601                         noiselevel=-1, level=logging.ERROR)
12602                 return 1
12603
12604         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12605         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12606
12607         os.umask(0022)
12608         dosyncuri = syncuri
12609         updatecache_flg = False
12610         if myaction == "metadata":
12611                 print "skipping sync"
12612                 updatecache_flg = True
12613         elif ".git" in vcs_dirs:
12614                 # Update existing git repository, and ignore the syncuri. We are
12615                 # going to trust the user and assume that the user is in the branch
12616                 # that he/she wants updated. We'll let the user manage branches with
12617                 # git directly.
12618                 if portage.process.find_binary("git") is None:
12619                         msg = ["Command not found: git",
12620                         "Type \"emerge dev-util/git\" to enable git support."]
12621                         for l in msg:
12622                                 writemsg_level("!!! %s\n" % l,
12623                                         level=logging.ERROR, noiselevel=-1)
12624                         return 1
12625                 msg = ">>> Starting git pull in %s..." % myportdir
12626                 emergelog(xterm_titles, msg )
12627                 writemsg_level(msg + "\n")
12628                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12629                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12630                 if exitcode != os.EX_OK:
12631                         msg = "!!! git pull error in %s." % myportdir
12632                         emergelog(xterm_titles, msg)
12633                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12634                         return exitcode
12635                 msg = ">>> Git pull in %s successful" % myportdir
12636                 emergelog(xterm_titles, msg)
12637                 writemsg_level(msg + "\n")
12638                 exitcode = git_sync_timestamps(settings, myportdir)
12639                 if exitcode == os.EX_OK:
12640                         updatecache_flg = True
12641         elif syncuri[:8]=="rsync://":
12642                 for vcs_dir in vcs_dirs:
12643                         writemsg_level(("!!! %s appears to be under revision " + \
12644                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12645                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12646                         return 1
12647                 if not os.path.exists("/usr/bin/rsync"):
12648                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12649                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12650                         sys.exit(1)
12651                 mytimeout=180
12652
12653                 rsync_opts = []
12654                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12655                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12656                         rsync_opts.extend([
12657                                 "--recursive",    # Recurse directories
12658                                 "--links",        # Consider symlinks
12659                                 "--safe-links",   # Ignore links outside of tree
12660                                 "--perms",        # Preserve permissions
12661                                 "--times",        # Preserive mod times
12662                                 "--compress",     # Compress the data transmitted
12663                                 "--force",        # Force deletion on non-empty dirs
12664                                 "--whole-file",   # Don't do block transfers, only entire files
12665                                 "--delete",       # Delete files that aren't in the master tree
12666                                 "--stats",        # Show final statistics about what was transfered
12667                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12668                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12669                                 "--exclude=/local",       # Exclude local     from consideration
12670                                 "--exclude=/packages",    # Exclude packages  from consideration
12671                         ])
12672
12673                 else:
12674                         # The below validation is not needed when using the above hardcoded
12675                         # defaults.
12676
12677                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12678                         rsync_opts.extend(
12679                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12680                         for opt in ("--recursive", "--times"):
12681                                 if opt not in rsync_opts:
12682                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12683                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12684                                         rsync_opts.append(opt)
12685         
12686                         for exclude in ("distfiles", "local", "packages"):
12687                                 opt = "--exclude=/%s" % exclude
12688                                 if opt not in rsync_opts:
12689                                         portage.writemsg(yellow("WARNING:") + \
12690                                         " adding required option %s not included in "  % opt + \
12691                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12692                                         rsync_opts.append(opt)
12693         
12694                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12695                                 def rsync_opt_startswith(opt_prefix):
12696                                         for x in rsync_opts:
12697                                                 if x.startswith(opt_prefix):
12698                                                         return True
12699                                         return False
12700
12701                                 if not rsync_opt_startswith("--timeout="):
12702                                         rsync_opts.append("--timeout=%d" % mytimeout)
12703
12704                                 for opt in ("--compress", "--whole-file"):
12705                                         if opt not in rsync_opts:
12706                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12707                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12708                                                 rsync_opts.append(opt)
12709
12710                 if "--quiet" in myopts:
12711                         rsync_opts.append("--quiet")    # Shut up a lot
12712                 else:
12713                         rsync_opts.append("--verbose")  # Print filelist
12714
12715                 if "--verbose" in myopts:
12716                         rsync_opts.append("--progress")  # Progress meter for each file
12717
12718                 if "--debug" in myopts:
12719                         rsync_opts.append("--checksum") # Force checksum on all files
12720
12721                 # Real local timestamp file.
12722                 servertimestampfile = os.path.join(
12723                         myportdir, "metadata", "timestamp.chk")
12724
12725                 content = portage.util.grabfile(servertimestampfile)
12726                 mytimestamp = 0
12727                 if content:
12728                         try:
12729                                 mytimestamp = time.mktime(time.strptime(content[0],
12730                                         "%a, %d %b %Y %H:%M:%S +0000"))
12731                         except (OverflowError, ValueError):
12732                                 pass
12733                 del content
12734
12735                 try:
12736                         rsync_initial_timeout = \
12737                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12738                 except ValueError:
12739                         rsync_initial_timeout = 15
12740
12741                 try:
12742                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12743                 except SystemExit, e:
12744                         raise # Needed else can't exit
12745                 except:
12746                         maxretries=3 #default number of retries
12747
12748                 retries=0
12749                 user_name, hostname, port = re.split(
12750                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12751                 if port is None:
12752                         port=""
12753                 if user_name is None:
12754                         user_name=""
12755                 updatecache_flg=True
12756                 all_rsync_opts = set(rsync_opts)
12757                 extra_rsync_opts = shlex.split(
12758                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12759                 all_rsync_opts.update(extra_rsync_opts)
12760                 family = socket.AF_INET
12761                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12762                         family = socket.AF_INET
12763                 elif socket.has_ipv6 and \
12764                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12765                         family = socket.AF_INET6
12766                 ips=[]
12767                 SERVER_OUT_OF_DATE = -1
12768                 EXCEEDED_MAX_RETRIES = -2
12769                 while (1):
12770                         if ips:
12771                                 del ips[0]
12772                         if ips==[]:
12773                                 try:
12774                                         for addrinfo in socket.getaddrinfo(
12775                                                 hostname, None, family, socket.SOCK_STREAM):
12776                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12777                                                         # IPv6 addresses need to be enclosed in square brackets
12778                                                         ips.append("[%s]" % addrinfo[4][0])
12779                                                 else:
12780                                                         ips.append(addrinfo[4][0])
12781                                         from random import shuffle
12782                                         shuffle(ips)
12783                                 except SystemExit, e:
12784                                         raise # Needed else can't exit
12785                                 except Exception, e:
12786                                         print "Notice:",str(e)
12787                                         dosyncuri=syncuri
12788
12789                         if ips:
12790                                 try:
12791                                         dosyncuri = syncuri.replace(
12792                                                 "//" + user_name + hostname + port + "/",
12793                                                 "//" + user_name + ips[0] + port + "/", 1)
12794                                 except SystemExit, e:
12795                                         raise # Needed else can't exit
12796                                 except Exception, e:
12797                                         print "Notice:",str(e)
12798                                         dosyncuri=syncuri
12799
12800                         if (retries==0):
12801                                 if "--ask" in myopts:
12802                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12803                                                 print
12804                                                 print "Quitting."
12805                                                 print
12806                                                 sys.exit(0)
12807                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12808                                 if "--quiet" not in myopts:
12809                                         print ">>> Starting rsync with "+dosyncuri+"..."
12810                         else:
12811                                 emergelog(xterm_titles,
12812                                         ">>> Starting retry %d of %d with %s" % \
12813                                                 (retries,maxretries,dosyncuri))
12814                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12815
12816                         if mytimestamp != 0 and "--quiet" not in myopts:
12817                                 print ">>> Checking server timestamp ..."
12818
12819                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12820
12821                         if "--debug" in myopts:
12822                                 print rsynccommand
12823
12824                         exitcode = os.EX_OK
12825                         servertimestamp = 0
12826                         # Even if there's no timestamp available locally, fetch the
12827                         # timestamp anyway as an initial probe to verify that the server is
12828                         # responsive.  This protects us from hanging indefinitely on a
12829                         # connection attempt to an unresponsive server which rsync's
12830                         # --timeout option does not prevent.
12831                         if True:
12832                                 # Temporary file for remote server timestamp comparison.
12833                                 from tempfile import mkstemp
12834                                 fd, tmpservertimestampfile = mkstemp()
12835                                 os.close(fd)
12836                                 mycommand = rsynccommand[:]
12837                                 mycommand.append(dosyncuri.rstrip("/") + \
12838                                         "/metadata/timestamp.chk")
12839                                 mycommand.append(tmpservertimestampfile)
12840                                 content = None
12841                                 mypids = []
12842                                 try:
12843                                         def timeout_handler(signum, frame):
12844                                                 raise portage.exception.PortageException("timed out")
12845                                         signal.signal(signal.SIGALRM, timeout_handler)
12846                                         # Timeout here in case the server is unresponsive.  The
12847                                         # --timeout rsync option doesn't apply to the initial
12848                                         # connection attempt.
12849                                         if rsync_initial_timeout:
12850                                                 signal.alarm(rsync_initial_timeout)
12851                                         try:
12852                                                 mypids.extend(portage.process.spawn(
12853                                                         mycommand, env=settings.environ(), returnpid=True))
12854                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12855                                                 content = portage.grabfile(tmpservertimestampfile)
12856                                         finally:
12857                                                 if rsync_initial_timeout:
12858                                                         signal.alarm(0)
12859                                                 try:
12860                                                         os.unlink(tmpservertimestampfile)
12861                                                 except OSError:
12862                                                         pass
12863                                 except portage.exception.PortageException, e:
12864                                         # timed out
12865                                         print e
12866                                         del e
12867                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12868                                                 os.kill(mypids[0], signal.SIGTERM)
12869                                                 os.waitpid(mypids[0], 0)
12870                                         # This is the same code rsync uses for timeout.
12871                                         exitcode = 30
12872                                 else:
12873                                         if exitcode != os.EX_OK:
12874                                                 if exitcode & 0xff:
12875                                                         exitcode = (exitcode & 0xff) << 8
12876                                                 else:
12877                                                         exitcode = exitcode >> 8
12878                                 if mypids:
12879                                         portage.process.spawned_pids.remove(mypids[0])
12880                                 if content:
12881                                         try:
12882                                                 servertimestamp = time.mktime(time.strptime(
12883                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12884                                         except (OverflowError, ValueError):
12885                                                 pass
12886                                 del mycommand, mypids, content
12887                         if exitcode == os.EX_OK:
12888                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12889                                         emergelog(xterm_titles,
12890                                                 ">>> Cancelling sync -- Already current.")
12891                                         print
12892                                         print ">>>"
12893                                         print ">>> Timestamps on the server and in the local repository are the same."
12894                                         print ">>> Cancelling all further sync action. You are already up to date."
12895                                         print ">>>"
12896                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12897                                         print ">>>"
12898                                         print
12899                                         sys.exit(0)
12900                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12901                                         emergelog(xterm_titles,
12902                                                 ">>> Server out of date: %s" % dosyncuri)
12903                                         print
12904                                         print ">>>"
12905                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12906                                         print ">>>"
12907                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12908                                         print ">>>"
12909                                         print
12910                                         exitcode = SERVER_OUT_OF_DATE
12911                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12912                                         # actual sync
12913                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12914                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12915                                         if exitcode in [0,1,3,4,11,14,20,21]:
12916                                                 break
12917                         elif exitcode in [1,3,4,11,14,20,21]:
12918                                 break
12919                         else:
12920                                 # Code 2 indicates protocol incompatibility, which is expected
12921                                 # for servers with protocol < 29 that don't support
12922                                 # --prune-empty-directories.  Retry for a server that supports
12923                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12924                                 pass
12925
12926                         retries=retries+1
12927
12928                         if retries<=maxretries:
12929                                 print ">>> Retrying..."
12930                                 time.sleep(11)
12931                         else:
12932                                 # over retries
12933                                 # exit loop
12934                                 updatecache_flg=False
12935                                 exitcode = EXCEEDED_MAX_RETRIES
12936                                 break
12937
12938                 if (exitcode==0):
12939                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12940                 elif exitcode == SERVER_OUT_OF_DATE:
12941                         sys.exit(1)
12942                 elif exitcode == EXCEEDED_MAX_RETRIES:
12943                         sys.stderr.write(
12944                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12945                         sys.exit(1)
12946                 elif (exitcode>0):
12947                         msg = []
12948                         if exitcode==1:
12949                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12950                                 msg.append("that your SYNC statement is proper.")
12951                                 msg.append("SYNC=" + settings["SYNC"])
12952                         elif exitcode==11:
12953                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12954                                 msg.append("this means your disk is full, but can be caused by corruption")
12955                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12956                                 msg.append("and try again after the problem has been fixed.")
12957                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12958                         elif exitcode==20:
12959                                 msg.append("Rsync was killed before it finished.")
12960                         else:
12961                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12962                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12963                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12964                                 msg.append("temporary problem unless complications exist with your network")
12965                                 msg.append("(and possibly your system's filesystem) configuration.")
12966                         for line in msg:
12967                                 out.eerror(line)
12968                         sys.exit(exitcode)
12969         elif syncuri[:6]=="cvs://":
12970                 if not os.path.exists("/usr/bin/cvs"):
12971                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12972                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12973                         sys.exit(1)
12974                 cvsroot=syncuri[6:]
12975                 cvsdir=os.path.dirname(myportdir)
12976                 if not os.path.exists(myportdir+"/CVS"):
12977                         #initial checkout
12978                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12979                         if os.path.exists(cvsdir+"/gentoo-x86"):
12980                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12981                                 sys.exit(1)
12982                         try:
12983                                 os.rmdir(myportdir)
12984                         except OSError, e:
12985                                 if e.errno != errno.ENOENT:
12986                                         sys.stderr.write(
12987                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12988                                         sys.exit(1)
12989                                 del e
12990                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12991                                 print "!!! cvs checkout error; exiting."
12992                                 sys.exit(1)
12993                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12994                 else:
12995                         #cvs update
12996                         print ">>> Starting cvs update with "+syncuri+"..."
12997                         retval = portage.process.spawn_bash(
12998                                 "cd %s; cvs -z0 -q update -dP" % \
12999                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13000                         if retval != os.EX_OK:
13001                                 sys.exit(retval)
13002                 dosyncuri = syncuri
13003         else:
13004                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13005                         noiselevel=-1, level=logging.ERROR)
13006                 return 1
13007
13008         if updatecache_flg and  \
13009                 myaction != "metadata" and \
13010                 "metadata-transfer" not in settings.features:
13011                 updatecache_flg = False
13012
13013         # Reload the whole config from scratch.
13014         settings, trees, mtimedb = load_emerge_config(trees=trees)
13015         root_config = trees[settings["ROOT"]]["root_config"]
13016         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13017
13018         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13019                 action_metadata(settings, portdb, myopts)
13020
13021         if portage._global_updates(trees, mtimedb["updates"]):
13022                 mtimedb.commit()
13023                 # Reload the whole config from scratch.
13024                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13025                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13026                 root_config = trees[settings["ROOT"]]["root_config"]
13027
13028         mybestpv = portdb.xmatch("bestmatch-visible",
13029                 portage.const.PORTAGE_PACKAGE_ATOM)
13030         mypvs = portage.best(
13031                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13032                 portage.const.PORTAGE_PACKAGE_ATOM))
13033
13034         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13035
13036         if myaction != "metadata":
13037                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13038                         retval = portage.process.spawn(
13039                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13040                                 dosyncuri], env=settings.environ())
13041                         if retval != os.EX_OK:
13042                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13043
13044         if(mybestpv != mypvs) and not "--quiet" in myopts:
13045                 print
13046                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13047                 print red(" * ")+"that you update portage now, before any other packages are updated."
13048                 print
13049                 print red(" * ")+"To update portage, run 'emerge portage' now."
13050                 print
13051         
13052         display_news_notification(root_config, myopts)
13053         return os.EX_OK
13054
13055 def git_sync_timestamps(settings, portdir):
13056         """
13057         Since git doesn't preserve timestamps, synchronize timestamps between
13058         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13059         for a given file as long as the file in the working tree is not modified
13060         (relative to HEAD).
13061         """
13062         cache_dir = os.path.join(portdir, "metadata", "cache")
13063         if not os.path.isdir(cache_dir):
13064                 return os.EX_OK
13065         writemsg_level(">>> Synchronizing timestamps...\n")
13066
13067         from portage.cache.cache_errors import CacheError
13068         try:
13069                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13070                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13071         except CacheError, e:
13072                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13073                         level=logging.ERROR, noiselevel=-1)
13074                 return 1
13075
13076         ec_dir = os.path.join(portdir, "eclass")
13077         try:
13078                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13079                         if f.endswith(".eclass"))
13080         except OSError, e:
13081                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13082                         level=logging.ERROR, noiselevel=-1)
13083                 return 1
13084
13085         args = [portage.const.BASH_BINARY, "-c",
13086                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13087                 portage._shell_quote(portdir)]
13088         import subprocess
13089         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13090         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13091         rval = proc.wait()
13092         if rval != os.EX_OK:
13093                 return rval
13094
13095         modified_eclasses = set(ec for ec in ec_names \
13096                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13097
13098         updated_ec_mtimes = {}
13099
13100         for cpv in cache_db:
13101                 cpv_split = portage.catpkgsplit(cpv)
13102                 if cpv_split is None:
13103                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13104                                 level=logging.ERROR, noiselevel=-1)
13105                         continue
13106
13107                 cat, pn, ver, rev = cpv_split
13108                 cat, pf = portage.catsplit(cpv)
13109                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13110                 if relative_eb_path in modified_files:
13111                         continue
13112
13113                 try:
13114                         cache_entry = cache_db[cpv]
13115                         eb_mtime = cache_entry.get("_mtime_")
13116                         ec_mtimes = cache_entry.get("_eclasses_")
13117                 except KeyError:
13118                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13119                                 level=logging.ERROR, noiselevel=-1)
13120                         continue
13121                 except CacheError, e:
13122                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13123                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13124                         continue
13125
13126                 if eb_mtime is None:
13127                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13128                                 level=logging.ERROR, noiselevel=-1)
13129                         continue
13130
13131                 try:
13132                         eb_mtime = long(eb_mtime)
13133                 except ValueError:
13134                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13135                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13136                         continue
13137
13138                 if ec_mtimes is None:
13139                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13140                                 level=logging.ERROR, noiselevel=-1)
13141                         continue
13142
13143                 if modified_eclasses.intersection(ec_mtimes):
13144                         continue
13145
13146                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13147                 if missing_eclasses:
13148                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13149                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13150                                 noiselevel=-1)
13151                         continue
13152
13153                 eb_path = os.path.join(portdir, relative_eb_path)
13154                 try:
13155                         current_eb_mtime = os.stat(eb_path)
13156                 except OSError:
13157                         writemsg_level("!!! Missing ebuild: %s\n" % \
13158                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13159                         continue
13160
13161                 inconsistent = False
13162                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13163                         updated_mtime = updated_ec_mtimes.get(ec)
13164                         if updated_mtime is not None and updated_mtime != ec_mtime:
13165                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13166                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13167                                 inconsistent = True
13168                                 break
13169
13170                 if inconsistent:
13171                         continue
13172
13173                 if current_eb_mtime != eb_mtime:
13174                         os.utime(eb_path, (eb_mtime, eb_mtime))
13175
13176                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13177                         if ec in updated_ec_mtimes:
13178                                 continue
13179                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13180                         current_mtime = long(os.stat(ec_path).st_mtime)
13181                         if current_mtime != ec_mtime:
13182                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13183                         updated_ec_mtimes[ec] = ec_mtime
13184
13185         return os.EX_OK
13186
13187 def action_metadata(settings, portdb, myopts):
13188         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13189         old_umask = os.umask(0002)
13190         cachedir = os.path.normpath(settings.depcachedir)
13191         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13192                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13193                                         "/sys", "/tmp", "/usr",  "/var"]:
13194                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13195                         "ROOT DIRECTORY ON YOUR SYSTEM."
13196                 print >> sys.stderr, \
13197                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13198                 sys.exit(73)
13199         if not os.path.exists(cachedir):
13200                 os.mkdir(cachedir)
13201
13202         ec = portage.eclass_cache.cache(portdb.porttree_root)
13203         myportdir = os.path.realpath(settings["PORTDIR"])
13204         cm = settings.load_best_module("portdbapi.metadbmodule")(
13205                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13206
13207         from portage.cache import util
13208
13209         class percentage_noise_maker(util.quiet_mirroring):
13210                 def __init__(self, dbapi):
13211                         self.dbapi = dbapi
13212                         self.cp_all = dbapi.cp_all()
13213                         l = len(self.cp_all)
13214                         self.call_update_min = 100000000
13215                         self.min_cp_all = l/100.0
13216                         self.count = 1
13217                         self.pstr = ''
13218
13219                 def __iter__(self):
13220                         for x in self.cp_all:
13221                                 self.count += 1
13222                                 if self.count > self.min_cp_all:
13223                                         self.call_update_min = 0
13224                                         self.count = 0
13225                                 for y in self.dbapi.cp_list(x):
13226                                         yield y
13227                         self.call_update_mine = 0
13228
13229                 def update(self, *arg):
13230                         try:                            self.pstr = int(self.pstr) + 1
13231                         except ValueError:      self.pstr = 1
13232                         sys.stdout.write("%s%i%%" % \
13233                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13234                         sys.stdout.flush()
13235                         self.call_update_min = 10000000
13236
13237                 def finish(self, *arg):
13238                         sys.stdout.write("\b\b\b\b100%\n")
13239                         sys.stdout.flush()
13240
13241         if "--quiet" in myopts:
13242                 def quicky_cpv_generator(cp_all_list):
13243                         for x in cp_all_list:
13244                                 for y in portdb.cp_list(x):
13245                                         yield y
13246                 source = quicky_cpv_generator(portdb.cp_all())
13247                 noise_maker = portage.cache.util.quiet_mirroring()
13248         else:
13249                 noise_maker = source = percentage_noise_maker(portdb)
13250         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13251                 eclass_cache=ec, verbose_instance=noise_maker)
13252
13253         sys.stdout.flush()
13254         os.umask(old_umask)
13255
13256 def action_regen(settings, portdb, max_jobs, max_load):
13257         xterm_titles = "notitles" not in settings.features
13258         emergelog(xterm_titles, " === regen")
13259         #regenerate cache entries
13260         portage.writemsg_stdout("Regenerating cache entries...\n")
13261         try:
13262                 os.close(sys.stdin.fileno())
13263         except SystemExit, e:
13264                 raise # Needed else can't exit
13265         except:
13266                 pass
13267         sys.stdout.flush()
13268
13269         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13270         regen.run()
13271
13272         portage.writemsg_stdout("done!\n")
13273         return regen.returncode
13274
13275 def action_config(settings, trees, myopts, myfiles):
13276         if len(myfiles) != 1:
13277                 print red("!!! config can only take a single package atom at this time\n")
13278                 sys.exit(1)
13279         if not is_valid_package_atom(myfiles[0]):
13280                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13281                         noiselevel=-1)
13282                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13283                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13284                 sys.exit(1)
13285         print
13286         try:
13287                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13288         except portage.exception.AmbiguousPackageName, e:
13289                 # Multiple matches thrown from cpv_expand
13290                 pkgs = e.args[0]
13291         if len(pkgs) == 0:
13292                 print "No packages found.\n"
13293                 sys.exit(0)
13294         elif len(pkgs) > 1:
13295                 if "--ask" in myopts:
13296                         options = []
13297                         print "Please select a package to configure:"
13298                         idx = 0
13299                         for pkg in pkgs:
13300                                 idx += 1
13301                                 options.append(str(idx))
13302                                 print options[-1]+") "+pkg
13303                         print "X) Cancel"
13304                         options.append("X")
13305                         idx = userquery("Selection?", options)
13306                         if idx == "X":
13307                                 sys.exit(0)
13308                         pkg = pkgs[int(idx)-1]
13309                 else:
13310                         print "The following packages available:"
13311                         for pkg in pkgs:
13312                                 print "* "+pkg
13313                         print "\nPlease use a specific atom or the --ask option."
13314                         sys.exit(1)
13315         else:
13316                 pkg = pkgs[0]
13317
13318         print
13319         if "--ask" in myopts:
13320                 if userquery("Ready to configure "+pkg+"?") == "No":
13321                         sys.exit(0)
13322         else:
13323                 print "Configuring pkg..."
13324         print
13325         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13326         mysettings = portage.config(clone=settings)
13327         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13328         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13329         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13330                 mysettings,
13331                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13332                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13333         if retval == os.EX_OK:
13334                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13335                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13336         print
13337
13338 def action_info(settings, trees, myopts, myfiles):
13339         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13340                 settings.profile_path, settings["CHOST"],
13341                 trees[settings["ROOT"]]["vartree"].dbapi)
13342         header_width = 65
13343         header_title = "System Settings"
13344         if myfiles:
13345                 print header_width * "="
13346                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13347         print header_width * "="
13348         print "System uname: "+platform.platform(aliased=1)
13349
13350         lastSync = portage.grabfile(os.path.join(
13351                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13352         print "Timestamp of tree:",
13353         if lastSync:
13354                 print lastSync[0]
13355         else:
13356                 print "Unknown"
13357
13358         output=commands.getstatusoutput("distcc --version")
13359         if not output[0]:
13360                 print str(output[1].split("\n",1)[0]),
13361                 if "distcc" in settings.features:
13362                         print "[enabled]"
13363                 else:
13364                         print "[disabled]"
13365
13366         output=commands.getstatusoutput("ccache -V")
13367         if not output[0]:
13368                 print str(output[1].split("\n",1)[0]),
13369                 if "ccache" in settings.features:
13370                         print "[enabled]"
13371                 else:
13372                         print "[disabled]"
13373
13374         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13375                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13376         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13377         myvars  = portage.util.unique_array(myvars)
13378         myvars.sort()
13379
13380         for x in myvars:
13381                 if portage.isvalidatom(x):
13382                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13383                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13384                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13385                         pkgs = []
13386                         for pn, ver, rev in pkg_matches:
13387                                 if rev != "r0":
13388                                         pkgs.append(ver + "-" + rev)
13389                                 else:
13390                                         pkgs.append(ver)
13391                         if pkgs:
13392                                 pkgs = ", ".join(pkgs)
13393                                 print "%-20s %s" % (x+":", pkgs)
13394                 else:
13395                         print "%-20s %s" % (x+":", "[NOT VALID]")
13396
13397         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13398
13399         if "--verbose" in myopts:
13400                 myvars=settings.keys()
13401         else:
13402                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13403                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13404                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13405                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13406
13407                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13408
13409         myvars = portage.util.unique_array(myvars)
13410         unset_vars = []
13411         myvars.sort()
13412         for x in myvars:
13413                 if x in settings:
13414                         if x != "USE":
13415                                 print '%s="%s"' % (x, settings[x])
13416                         else:
13417                                 use = set(settings["USE"].split())
13418                                 use_expand = settings["USE_EXPAND"].split()
13419                                 use_expand.sort()
13420                                 for varname in use_expand:
13421                                         flag_prefix = varname.lower() + "_"
13422                                         for f in list(use):
13423                                                 if f.startswith(flag_prefix):
13424                                                         use.remove(f)
13425                                 use = list(use)
13426                                 use.sort()
13427                                 print 'USE="%s"' % " ".join(use),
13428                                 for varname in use_expand:
13429                                         myval = settings.get(varname)
13430                                         if myval:
13431                                                 print '%s="%s"' % (varname, myval),
13432                                 print
13433                 else:
13434                         unset_vars.append(x)
13435         if unset_vars:
13436                 print "Unset:  "+", ".join(unset_vars)
13437         print
13438
13439         if "--debug" in myopts:
13440                 for x in dir(portage):
13441                         module = getattr(portage, x)
13442                         if "cvs_id_string" in dir(module):
13443                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13444
13445         # See if we can find any packages installed matching the strings
13446         # passed on the command line
13447         mypkgs = []
13448         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13449         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13450         for x in myfiles:
13451                 mypkgs.extend(vardb.match(x))
13452
13453         # If some packages were found...
13454         if mypkgs:
13455                 # Get our global settings (we only print stuff if it varies from
13456                 # the current config)
13457                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13458                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13459                 global_vals = {}
13460                 pkgsettings = portage.config(clone=settings)
13461
13462                 for myvar in mydesiredvars:
13463                         global_vals[myvar] = set(settings.get(myvar, "").split())
13464
13465                 # Loop through each package
13466                 # Only print settings if they differ from global settings
13467                 header_title = "Package Settings"
13468                 print header_width * "="
13469                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13470                 print header_width * "="
13471                 from portage.output import EOutput
13472                 out = EOutput()
13473                 for pkg in mypkgs:
13474                         # Get all package specific variables
13475                         auxvalues = vardb.aux_get(pkg, auxkeys)
13476                         valuesmap = {}
13477                         for i in xrange(len(auxkeys)):
13478                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13479                         diff_values = {}
13480                         for myvar in mydesiredvars:
13481                                 # If the package variable doesn't match the
13482                                 # current global variable, something has changed
13483                                 # so set diff_found so we know to print
13484                                 if valuesmap[myvar] != global_vals[myvar]:
13485                                         diff_values[myvar] = valuesmap[myvar]
13486                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13487                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13488                         pkgsettings.reset()
13489                         # If a matching ebuild is no longer available in the tree, maybe it
13490                         # would make sense to compare against the flags for the best
13491                         # available version with the same slot?
13492                         mydb = None
13493                         if portdb.cpv_exists(pkg):
13494                                 mydb = portdb
13495                         pkgsettings.setcpv(pkg, mydb=mydb)
13496                         if valuesmap["IUSE"].intersection(
13497                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13498                                 diff_values["USE"] = valuesmap["USE"]
13499                         # If a difference was found, print the info for
13500                         # this package.
13501                         if diff_values:
13502                                 # Print package info
13503                                 print "%s was built with the following:" % pkg
13504                                 for myvar in mydesiredvars + ["USE"]:
13505                                         if myvar in diff_values:
13506                                                 mylist = list(diff_values[myvar])
13507                                                 mylist.sort()
13508                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13509                                 print
13510                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13511                         ebuildpath = vardb.findname(pkg)
13512                         if not ebuildpath or not os.path.exists(ebuildpath):
13513                                 out.ewarn("No ebuild found for '%s'" % pkg)
13514                                 continue
13515                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13516                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13517                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13518                                 tree="vartree")
13519
13520 def action_search(root_config, myopts, myfiles, spinner):
13521         if not myfiles:
13522                 print "emerge: no search terms provided."
13523         else:
13524                 searchinstance = search(root_config,
13525                         spinner, "--searchdesc" in myopts,
13526                         "--quiet" not in myopts, "--usepkg" in myopts,
13527                         "--usepkgonly" in myopts)
13528                 for mysearch in myfiles:
13529                         try:
13530                                 searchinstance.execute(mysearch)
13531                         except re.error, comment:
13532                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13533                                 sys.exit(1)
13534                         searchinstance.output()
13535
13536 def action_depclean(settings, trees, ldpath_mtimes,
13537         myopts, action, myfiles, spinner):
13538         # Kill packages that aren't explicitly merged or are required as a
13539         # dependency of another package. World file is explicit.
13540
13541         # Global depclean or prune operations are not very safe when there are
13542         # missing dependencies since it's unknown how badly incomplete
13543         # the dependency graph is, and we might accidentally remove packages
13544         # that should have been pulled into the graph. On the other hand, it's
13545         # relatively safe to ignore missing deps when only asked to remove
13546         # specific packages.
13547         allow_missing_deps = len(myfiles) > 0
13548
13549         msg = []
13550         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13551         msg.append("mistakes. Packages that are part of the world set will always\n")
13552         msg.append("be kept.  They can be manually added to this set with\n")
13553         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13554         msg.append("package.provided (see portage(5)) will be removed by\n")
13555         msg.append("depclean, even if they are part of the world set.\n")
13556         msg.append("\n")
13557         msg.append("As a safety measure, depclean will not remove any packages\n")
13558         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13559         msg.append("consequence, it is often necessary to run %s\n" % \
13560                 good("`emerge --update"))
13561         msg.append(good("--newuse --deep @system @world`") + \
13562                 " prior to depclean.\n")
13563
13564         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13565                 portage.writemsg_stdout("\n")
13566                 for x in msg:
13567                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13568
13569         xterm_titles = "notitles" not in settings.features
13570         myroot = settings["ROOT"]
13571         root_config = trees[myroot]["root_config"]
13572         getSetAtoms = root_config.setconfig.getSetAtoms
13573         vardb = trees[myroot]["vartree"].dbapi
13574
13575         required_set_names = ("system", "world")
13576         required_sets = {}
13577         set_args = []
13578
13579         for s in required_set_names:
13580                 required_sets[s] = InternalPackageSet(
13581                         initial_atoms=getSetAtoms(s))
13582
13583         
13584         # When removing packages, use a temporary version of world
13585         # which excludes packages that are intended to be eligible for
13586         # removal.
13587         world_temp_set = required_sets["world"]
13588         system_set = required_sets["system"]
13589
13590         if not system_set or not world_temp_set:
13591
13592                 if not system_set:
13593                         writemsg_level("!!! You have no system list.\n",
13594                                 level=logging.ERROR, noiselevel=-1)
13595
13596                 if not world_temp_set:
13597                         writemsg_level("!!! You have no world file.\n",
13598                                         level=logging.WARNING, noiselevel=-1)
13599
13600                 writemsg_level("!!! Proceeding is likely to " + \
13601                         "break your installation.\n",
13602                         level=logging.WARNING, noiselevel=-1)
13603                 if "--pretend" not in myopts:
13604                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13605
13606         if action == "depclean":
13607                 emergelog(xterm_titles, " >>> depclean")
13608
13609         import textwrap
13610         args_set = InternalPackageSet()
13611         if myfiles:
13612                 for x in myfiles:
13613                         if not is_valid_package_atom(x):
13614                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13615                                         level=logging.ERROR, noiselevel=-1)
13616                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13617                                 return
13618                         try:
13619                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13620                         except portage.exception.AmbiguousPackageName, e:
13621                                 msg = "The short ebuild name \"" + x + \
13622                                         "\" is ambiguous.  Please specify " + \
13623                                         "one of the following " + \
13624                                         "fully-qualified ebuild names instead:"
13625                                 for line in textwrap.wrap(msg, 70):
13626                                         writemsg_level("!!! %s\n" % (line,),
13627                                                 level=logging.ERROR, noiselevel=-1)
13628                                 for i in e[0]:
13629                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13630                                                 level=logging.ERROR, noiselevel=-1)
13631                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13632                                 return
13633                         args_set.add(atom)
13634                 matched_packages = False
13635                 for x in args_set:
13636                         if vardb.match(x):
13637                                 matched_packages = True
13638                                 break
13639                 if not matched_packages:
13640                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13641                                 action)
13642                         return
13643
13644         writemsg_level("\nCalculating dependencies  ")
13645         resolver_params = create_depgraph_params(myopts, "remove")
13646         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13647         vardb = resolver.trees[myroot]["vartree"].dbapi
13648
13649         if action == "depclean":
13650
13651                 if args_set:
13652                         # Pull in everything that's installed but not matched
13653                         # by an argument atom since we don't want to clean any
13654                         # package if something depends on it.
13655
13656                         world_temp_set.clear()
13657                         for pkg in vardb:
13658                                 spinner.update()
13659
13660                                 try:
13661                                         if args_set.findAtomForPackage(pkg) is None:
13662                                                 world_temp_set.add("=" + pkg.cpv)
13663                                                 continue
13664                                 except portage.exception.InvalidDependString, e:
13665                                         show_invalid_depstring_notice(pkg,
13666                                                 pkg.metadata["PROVIDE"], str(e))
13667                                         del e
13668                                         world_temp_set.add("=" + pkg.cpv)
13669                                         continue
13670
13671         elif action == "prune":
13672
13673                 # Pull in everything that's installed since we don't
13674                 # to prune a package if something depends on it.
13675                 world_temp_set.clear()
13676                 world_temp_set.update(vardb.cp_all())
13677
13678                 if not args_set:
13679
13680                         # Try to prune everything that's slotted.
13681                         for cp in vardb.cp_all():
13682                                 if len(vardb.cp_list(cp)) > 1:
13683                                         args_set.add(cp)
13684
13685                 # Remove atoms from world that match installed packages
13686                 # that are also matched by argument atoms, but do not remove
13687                 # them if they match the highest installed version.
13688                 for pkg in vardb:
13689                         spinner.update()
13690                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13691                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13692                                 raise AssertionError("package expected in matches: " + \
13693                                         "cp = %s, cpv = %s matches = %s" % \
13694                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13695
13696                         highest_version = pkgs_for_cp[-1]
13697                         if pkg == highest_version:
13698                                 # pkg is the highest version
13699                                 world_temp_set.add("=" + pkg.cpv)
13700                                 continue
13701
13702                         if len(pkgs_for_cp) <= 1:
13703                                 raise AssertionError("more packages expected: " + \
13704                                         "cp = %s, cpv = %s matches = %s" % \
13705                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13706
13707                         try:
13708                                 if args_set.findAtomForPackage(pkg) is None:
13709                                         world_temp_set.add("=" + pkg.cpv)
13710                                         continue
13711                         except portage.exception.InvalidDependString, e:
13712                                 show_invalid_depstring_notice(pkg,
13713                                         pkg.metadata["PROVIDE"], str(e))
13714                                 del e
13715                                 world_temp_set.add("=" + pkg.cpv)
13716                                 continue
13717
13718         set_args = {}
13719         for s, package_set in required_sets.iteritems():
13720                 set_atom = SETPREFIX + s
13721                 set_arg = SetArg(arg=set_atom, set=package_set,
13722                         root_config=resolver.roots[myroot])
13723                 set_args[s] = set_arg
13724                 for atom in set_arg.set:
13725                         resolver._dep_stack.append(
13726                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13727                         resolver.digraph.add(set_arg, None)
13728
13729         success = resolver._complete_graph()
13730         writemsg_level("\b\b... done!\n")
13731
13732         resolver.display_problems()
13733
13734         if not success:
13735                 return 1
13736
13737         def unresolved_deps():
13738
13739                 unresolvable = set()
13740                 for dep in resolver._initially_unsatisfied_deps:
13741                         if isinstance(dep.parent, Package) and \
13742                                 (dep.priority > UnmergeDepPriority.SOFT):
13743                                 unresolvable.add((dep.atom, dep.parent.cpv))
13744
13745                 if not unresolvable:
13746                         return False
13747
13748                 if unresolvable and not allow_missing_deps:
13749                         prefix = bad(" * ")
13750                         msg = []
13751                         msg.append("Dependencies could not be completely resolved due to")
13752                         msg.append("the following required packages not being installed:")
13753                         msg.append("")
13754                         for atom, parent in unresolvable:
13755                                 msg.append("  %s pulled in by:" % (atom,))
13756                                 msg.append("    %s" % (parent,))
13757                                 msg.append("")
13758                         msg.append("Have you forgotten to run " + \
13759                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13760                         msg.append(("to %s? It may be necessary to manually " + \
13761                                 "uninstall packages that no longer") % action)
13762                         msg.append("exist in the portage tree since " + \
13763                                 "it may not be possible to satisfy their")
13764                         msg.append("dependencies.  Also, be aware of " + \
13765                                 "the --with-bdeps option that is documented")
13766                         msg.append("in " + good("`man emerge`") + ".")
13767                         if action == "prune":
13768                                 msg.append("")
13769                                 msg.append("If you would like to ignore " + \
13770                                         "dependencies then use %s." % good("--nodeps"))
13771                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13772                                 level=logging.ERROR, noiselevel=-1)
13773                         return True
13774                 return False
13775
13776         if unresolved_deps():
13777                 return 1
13778
13779         graph = resolver.digraph.copy()
13780         required_pkgs_total = 0
13781         for node in graph:
13782                 if isinstance(node, Package):
13783                         required_pkgs_total += 1
13784
13785         def show_parents(child_node):
13786                 parent_nodes = graph.parent_nodes(child_node)
13787                 if not parent_nodes:
13788                         # With --prune, the highest version can be pulled in without any
13789                         # real parent since all installed packages are pulled in.  In that
13790                         # case there's nothing to show here.
13791                         return
13792                 parent_strs = []
13793                 for node in parent_nodes:
13794                         parent_strs.append(str(getattr(node, "cpv", node)))
13795                 parent_strs.sort()
13796                 msg = []
13797                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13798                 for parent_str in parent_strs:
13799                         msg.append("    %s\n" % (parent_str,))
13800                 msg.append("\n")
13801                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13802
13803         def cmp_pkg_cpv(pkg1, pkg2):
13804                 """Sort Package instances by cpv."""
13805                 if pkg1.cpv > pkg2.cpv:
13806                         return 1
13807                 elif pkg1.cpv == pkg2.cpv:
13808                         return 0
13809                 else:
13810                         return -1
13811
13812         def create_cleanlist():
13813                 pkgs_to_remove = []
13814
13815                 if action == "depclean":
13816                         if args_set:
13817
13818                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13819                                         arg_atom = None
13820                                         try:
13821                                                 arg_atom = args_set.findAtomForPackage(pkg)
13822                                         except portage.exception.InvalidDependString:
13823                                                 # this error has already been displayed by now
13824                                                 continue
13825
13826                                         if arg_atom:
13827                                                 if pkg not in graph:
13828                                                         pkgs_to_remove.append(pkg)
13829                                                 elif "--verbose" in myopts:
13830                                                         show_parents(pkg)
13831
13832                         else:
13833                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13834                                         if pkg not in graph:
13835                                                 pkgs_to_remove.append(pkg)
13836                                         elif "--verbose" in myopts:
13837                                                 show_parents(pkg)
13838
13839                 elif action == "prune":
13840                         # Prune really uses all installed instead of world. It's not
13841                         # a real reverse dependency so don't display it as such.
13842                         graph.remove(set_args["world"])
13843
13844                         for atom in args_set:
13845                                 for pkg in vardb.match_pkgs(atom):
13846                                         if pkg not in graph:
13847                                                 pkgs_to_remove.append(pkg)
13848                                         elif "--verbose" in myopts:
13849                                                 show_parents(pkg)
13850
13851                 if not pkgs_to_remove:
13852                         writemsg_level(
13853                                 ">>> No packages selected for removal by %s\n" % action)
13854                         if "--verbose" not in myopts:
13855                                 writemsg_level(
13856                                         ">>> To see reverse dependencies, use %s\n" % \
13857                                                 good("--verbose"))
13858                         if action == "prune":
13859                                 writemsg_level(
13860                                         ">>> To ignore dependencies, use %s\n" % \
13861                                                 good("--nodeps"))
13862
13863                 return pkgs_to_remove
13864
13865         cleanlist = create_cleanlist()
13866
13867         if len(cleanlist):
13868                 clean_set = set(cleanlist)
13869
13870                 # Check if any of these package are the sole providers of libraries
13871                 # with consumers that have not been selected for removal. If so, these
13872                 # packages and any dependencies need to be added to the graph.
13873                 real_vardb = trees[myroot]["vartree"].dbapi
13874                 linkmap = real_vardb.linkmap
13875                 liblist = linkmap.listLibraryObjects()
13876                 consumer_cache = {}
13877                 provider_cache = {}
13878                 soname_cache = {}
13879                 consumer_map = {}
13880
13881                 writemsg_level(">>> Checking for lib consumers...\n")
13882
13883                 for pkg in cleanlist:
13884                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13885                         provided_libs = set()
13886
13887                         for lib in liblist:
13888                                 if pkg_dblink.isowner(lib, myroot):
13889                                         provided_libs.add(lib)
13890
13891                         if not provided_libs:
13892                                 continue
13893
13894                         consumers = {}
13895                         for lib in provided_libs:
13896                                 lib_consumers = consumer_cache.get(lib)
13897                                 if lib_consumers is None:
13898                                         lib_consumers = linkmap.findConsumers(lib)
13899                                         consumer_cache[lib] = lib_consumers
13900                                 if lib_consumers:
13901                                         consumers[lib] = lib_consumers
13902
13903                         if not consumers:
13904                                 continue
13905
13906                         for lib, lib_consumers in consumers.items():
13907                                 for consumer_file in list(lib_consumers):
13908                                         if pkg_dblink.isowner(consumer_file, myroot):
13909                                                 lib_consumers.remove(consumer_file)
13910                                 if not lib_consumers:
13911                                         del consumers[lib]
13912
13913                         if not consumers:
13914                                 continue
13915
13916                         for lib, lib_consumers in consumers.iteritems():
13917
13918                                 soname = soname_cache.get(lib)
13919                                 if soname is None:
13920                                         soname = linkmap.getSoname(lib)
13921                                         soname_cache[lib] = soname
13922
13923                                 consumer_providers = []
13924                                 for lib_consumer in lib_consumers:
13925                                         providers = provider_cache.get(lib)
13926                                         if providers is None:
13927                                                 providers = linkmap.findProviders(lib_consumer)
13928                                                 provider_cache[lib_consumer] = providers
13929                                         if soname not in providers:
13930                                                 # Why does this happen?
13931                                                 continue
13932                                         consumer_providers.append(
13933                                                 (lib_consumer, providers[soname]))
13934
13935                                 consumers[lib] = consumer_providers
13936
13937                         consumer_map[pkg] = consumers
13938
13939                 if consumer_map:
13940
13941                         search_files = set()
13942                         for consumers in consumer_map.itervalues():
13943                                 for lib, consumer_providers in consumers.iteritems():
13944                                         for lib_consumer, providers in consumer_providers:
13945                                                 search_files.add(lib_consumer)
13946                                                 search_files.update(providers)
13947
13948                         writemsg_level(">>> Assigning files to packages...\n")
13949                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13950
13951                         for pkg, consumers in consumer_map.items():
13952                                 for lib, consumer_providers in consumers.items():
13953                                         lib_consumers = set()
13954
13955                                         for lib_consumer, providers in consumer_providers:
13956                                                 owner_set = file_owners.get(lib_consumer)
13957                                                 provider_dblinks = set()
13958                                                 provider_pkgs = set()
13959
13960                                                 if len(providers) > 1:
13961                                                         for provider in providers:
13962                                                                 provider_set = file_owners.get(provider)
13963                                                                 if provider_set is not None:
13964                                                                         provider_dblinks.update(provider_set)
13965
13966                                                 if len(provider_dblinks) > 1:
13967                                                         for provider_dblink in provider_dblinks:
13968                                                                 pkg_key = ("installed", myroot,
13969                                                                         provider_dblink.mycpv, "nomerge")
13970                                                                 if pkg_key not in clean_set:
13971                                                                         provider_pkgs.add(vardb.get(pkg_key))
13972
13973                                                 if provider_pkgs:
13974                                                         continue
13975
13976                                                 if owner_set is not None:
13977                                                         lib_consumers.update(owner_set)
13978
13979                                         for consumer_dblink in list(lib_consumers):
13980                                                 if ("installed", myroot, consumer_dblink.mycpv,
13981                                                         "nomerge") in clean_set:
13982                                                         lib_consumers.remove(consumer_dblink)
13983                                                         continue
13984
13985                                         if lib_consumers:
13986                                                 consumers[lib] = lib_consumers
13987                                         else:
13988                                                 del consumers[lib]
13989                                 if not consumers:
13990                                         del consumer_map[pkg]
13991
13992                 if consumer_map:
13993                         # TODO: Implement a package set for rebuilding consumer packages.
13994
13995                         msg = "In order to avoid breakage of link level " + \
13996                                 "dependencies, one or more packages will not be removed. " + \
13997                                 "This can be solved by rebuilding " + \
13998                                 "the packages that pulled them in."
13999
14000                         prefix = bad(" * ")
14001                         from textwrap import wrap
14002                         writemsg_level("".join(prefix + "%s\n" % line for \
14003                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14004
14005                         msg = []
14006                         for pkg, consumers in consumer_map.iteritems():
14007                                 unique_consumers = set(chain(*consumers.values()))
14008                                 unique_consumers = sorted(consumer.mycpv \
14009                                         for consumer in unique_consumers)
14010                                 msg.append("")
14011                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14012                                 for consumer in unique_consumers:
14013                                         msg.append("    %s" % (consumer,))
14014                         msg.append("")
14015                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14016                                 level=logging.WARNING, noiselevel=-1)
14017
14018                         # Add lib providers to the graph as children of lib consumers,
14019                         # and also add any dependencies pulled in by the provider.
14020                         writemsg_level(">>> Adding lib providers to graph...\n")
14021
14022                         for pkg, consumers in consumer_map.iteritems():
14023                                 for consumer_dblink in set(chain(*consumers.values())):
14024                                         consumer_pkg = vardb.get(("installed", myroot,
14025                                                 consumer_dblink.mycpv, "nomerge"))
14026                                         if not resolver._add_pkg(pkg,
14027                                                 Dependency(parent=consumer_pkg,
14028                                                 priority=UnmergeDepPriority(runtime=True),
14029                                                 root=pkg.root)):
14030                                                 resolver.display_problems()
14031                                                 return 1
14032
14033                         writemsg_level("\nCalculating dependencies  ")
14034                         success = resolver._complete_graph()
14035                         writemsg_level("\b\b... done!\n")
14036                         resolver.display_problems()
14037                         if not success:
14038                                 return 1
14039                         if unresolved_deps():
14040                                 return 1
14041
14042                         graph = resolver.digraph.copy()
14043                         required_pkgs_total = 0
14044                         for node in graph:
14045                                 if isinstance(node, Package):
14046                                         required_pkgs_total += 1
14047                         cleanlist = create_cleanlist()
14048                         if not cleanlist:
14049                                 return 0
14050                         clean_set = set(cleanlist)
14051
14052                 # Use a topological sort to create an unmerge order such that
14053                 # each package is unmerged before it's dependencies. This is
14054                 # necessary to avoid breaking things that may need to run
14055                 # during pkg_prerm or pkg_postrm phases.
14056
14057                 # Create a new graph to account for dependencies between the
14058                 # packages being unmerged.
14059                 graph = digraph()
14060                 del cleanlist[:]
14061
14062                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14063                 runtime = UnmergeDepPriority(runtime=True)
14064                 runtime_post = UnmergeDepPriority(runtime_post=True)
14065                 buildtime = UnmergeDepPriority(buildtime=True)
14066                 priority_map = {
14067                         "RDEPEND": runtime,
14068                         "PDEPEND": runtime_post,
14069                         "DEPEND": buildtime,
14070                 }
14071
14072                 for node in clean_set:
14073                         graph.add(node, None)
14074                         mydeps = []
14075                         node_use = node.metadata["USE"].split()
14076                         for dep_type in dep_keys:
14077                                 depstr = node.metadata[dep_type]
14078                                 if not depstr:
14079                                         continue
14080                                 try:
14081                                         portage.dep._dep_check_strict = False
14082                                         success, atoms = portage.dep_check(depstr, None, settings,
14083                                                 myuse=node_use, trees=resolver._graph_trees,
14084                                                 myroot=myroot)
14085                                 finally:
14086                                         portage.dep._dep_check_strict = True
14087                                 if not success:
14088                                         # Ignore invalid deps of packages that will
14089                                         # be uninstalled anyway.
14090                                         continue
14091
14092                                 priority = priority_map[dep_type]
14093                                 for atom in atoms:
14094                                         if not isinstance(atom, portage.dep.Atom):
14095                                                 # Ignore invalid atoms returned from dep_check().
14096                                                 continue
14097                                         if atom.blocker:
14098                                                 continue
14099                                         matches = vardb.match_pkgs(atom)
14100                                         if not matches:
14101                                                 continue
14102                                         for child_node in matches:
14103                                                 if child_node in clean_set:
14104                                                         graph.add(child_node, node, priority=priority)
14105
14106                 ordered = True
14107                 if len(graph.order) == len(graph.root_nodes()):
14108                         # If there are no dependencies between packages
14109                         # let unmerge() group them by cat/pn.
14110                         ordered = False
14111                         cleanlist = [pkg.cpv for pkg in graph.order]
14112                 else:
14113                         # Order nodes from lowest to highest overall reference count for
14114                         # optimal root node selection.
14115                         node_refcounts = {}
14116                         for node in graph.order:
14117                                 node_refcounts[node] = len(graph.parent_nodes(node))
14118                         def cmp_reference_count(node1, node2):
14119                                 return node_refcounts[node1] - node_refcounts[node2]
14120                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14121         
14122                         ignore_priority_range = [None]
14123                         ignore_priority_range.extend(
14124                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14125                         while not graph.empty():
14126                                 for ignore_priority in ignore_priority_range:
14127                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14128                                         if nodes:
14129                                                 break
14130                                 if not nodes:
14131                                         raise AssertionError("no root nodes")
14132                                 if ignore_priority is not None:
14133                                         # Some deps have been dropped due to circular dependencies,
14134                                         # so only pop one node in order do minimize the number that
14135                                         # are dropped.
14136                                         del nodes[1:]
14137                                 for node in nodes:
14138                                         graph.remove(node)
14139                                         cleanlist.append(node.cpv)
14140
14141                 unmerge(root_config, myopts, "unmerge", cleanlist,
14142                         ldpath_mtimes, ordered=ordered)
14143
14144         if action == "prune":
14145                 return
14146
14147         if not cleanlist and "--quiet" in myopts:
14148                 return
14149
14150         print "Packages installed:   "+str(len(vardb.cpv_all()))
14151         print "Packages in world:    " + \
14152                 str(len(root_config.sets["world"].getAtoms()))
14153         print "Packages in system:   " + \
14154                 str(len(root_config.sets["system"].getAtoms()))
14155         print "Required packages:    "+str(required_pkgs_total)
14156         if "--pretend" in myopts:
14157                 print "Number to remove:     "+str(len(cleanlist))
14158         else:
14159                 print "Number removed:       "+str(len(cleanlist))
14160
14161 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14162         """
14163         Construct a depgraph for the given resume list. This will raise
14164         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14165         @rtype: tuple
14166         @returns: (success, depgraph, dropped_tasks)
14167         """
14168         skip_masked = True
14169         skip_unsatisfied = True
14170         mergelist = mtimedb["resume"]["mergelist"]
14171         dropped_tasks = set()
14172         while True:
14173                 mydepgraph = depgraph(settings, trees,
14174                         myopts, myparams, spinner)
14175                 try:
14176                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14177                                 skip_masked=skip_masked)
14178                 except depgraph.UnsatisfiedResumeDep, e:
14179                         if not skip_unsatisfied:
14180                                 raise
14181
14182                         graph = mydepgraph.digraph
14183                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14184                                 for dep in e.value)
14185                         traversed_nodes = set()
14186                         unsatisfied_stack = list(unsatisfied_parents)
14187                         while unsatisfied_stack:
14188                                 pkg = unsatisfied_stack.pop()
14189                                 if pkg in traversed_nodes:
14190                                         continue
14191                                 traversed_nodes.add(pkg)
14192
14193                                 # If this package was pulled in by a parent
14194                                 # package scheduled for merge, removing this
14195                                 # package may cause the the parent package's
14196                                 # dependency to become unsatisfied.
14197                                 for parent_node in graph.parent_nodes(pkg):
14198                                         if not isinstance(parent_node, Package) \
14199                                                 or parent_node.operation not in ("merge", "nomerge"):
14200                                                 continue
14201                                         unsatisfied = \
14202                                                 graph.child_nodes(parent_node,
14203                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14204                                         if pkg in unsatisfied:
14205                                                 unsatisfied_parents[parent_node] = parent_node
14206                                                 unsatisfied_stack.append(parent_node)
14207
14208                         pruned_mergelist = []
14209                         for x in mergelist:
14210                                 if isinstance(x, list) and \
14211                                         tuple(x) not in unsatisfied_parents:
14212                                         pruned_mergelist.append(x)
14213
14214                         # If the mergelist doesn't shrink then this loop is infinite.
14215                         if len(pruned_mergelist) == len(mergelist):
14216                                 # This happens if a package can't be dropped because
14217                                 # it's already installed, but it has unsatisfied PDEPEND.
14218                                 raise
14219                         mergelist[:] = pruned_mergelist
14220
14221                         # Exclude installed packages that have been removed from the graph due
14222                         # to failure to build/install runtime dependencies after the dependent
14223                         # package has already been installed.
14224                         dropped_tasks.update(pkg for pkg in \
14225                                 unsatisfied_parents if pkg.operation != "nomerge")
14226                         mydepgraph.break_refs(unsatisfied_parents)
14227
14228                         del e, graph, traversed_nodes, \
14229                                 unsatisfied_parents, unsatisfied_stack
14230                         continue
14231                 else:
14232                         break
14233         return (success, mydepgraph, dropped_tasks)
14234
14235 def action_build(settings, trees, mtimedb,
14236         myopts, myaction, myfiles, spinner):
14237
14238         # validate the state of the resume data
14239         # so that we can make assumptions later.
14240         for k in ("resume", "resume_backup"):
14241                 if k not in mtimedb:
14242                         continue
14243                 resume_data = mtimedb[k]
14244                 if not isinstance(resume_data, dict):
14245                         del mtimedb[k]
14246                         continue
14247                 mergelist = resume_data.get("mergelist")
14248                 if not isinstance(mergelist, list):
14249                         del mtimedb[k]
14250                         continue
14251                 for x in mergelist:
14252                         if not (isinstance(x, list) and len(x) == 4):
14253                                 continue
14254                         pkg_type, pkg_root, pkg_key, pkg_action = x
14255                         if pkg_root not in trees:
14256                                 # Current $ROOT setting differs,
14257                                 # so the list must be stale.
14258                                 mergelist = None
14259                                 break
14260                 if not mergelist:
14261                         del mtimedb[k]
14262                         continue
14263                 resume_opts = resume_data.get("myopts")
14264                 if not isinstance(resume_opts, (dict, list)):
14265                         del mtimedb[k]
14266                         continue
14267                 favorites = resume_data.get("favorites")
14268                 if not isinstance(favorites, list):
14269                         del mtimedb[k]
14270                         continue
14271
14272         resume = False
14273         if "--resume" in myopts and \
14274                 ("resume" in mtimedb or
14275                 "resume_backup" in mtimedb):
14276                 resume = True
14277                 if "resume" not in mtimedb:
14278                         mtimedb["resume"] = mtimedb["resume_backup"]
14279                         del mtimedb["resume_backup"]
14280                         mtimedb.commit()
14281                 # "myopts" is a list for backward compatibility.
14282                 resume_opts = mtimedb["resume"].get("myopts", [])
14283                 if isinstance(resume_opts, list):
14284                         resume_opts = dict((k,True) for k in resume_opts)
14285                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14286                         resume_opts.pop(opt, None)
14287                 myopts.update(resume_opts)
14288
14289                 if "--debug" in myopts:
14290                         writemsg_level("myopts %s\n" % (myopts,))
14291
14292                 # Adjust config according to options of the command being resumed.
14293                 for myroot in trees:
14294                         mysettings =  trees[myroot]["vartree"].settings
14295                         mysettings.unlock()
14296                         adjust_config(myopts, mysettings)
14297                         mysettings.lock()
14298                         del myroot, mysettings
14299
14300         ldpath_mtimes = mtimedb["ldpath"]
14301         favorites=[]
14302         merge_count = 0
14303         buildpkgonly = "--buildpkgonly" in myopts
14304         pretend = "--pretend" in myopts
14305         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14306         ask = "--ask" in myopts
14307         nodeps = "--nodeps" in myopts
14308         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14309         tree = "--tree" in myopts
14310         if nodeps and tree:
14311                 tree = False
14312                 del myopts["--tree"]
14313                 portage.writemsg(colorize("WARN", " * ") + \
14314                         "--tree is broken with --nodeps. Disabling...\n")
14315         debug = "--debug" in myopts
14316         verbose = "--verbose" in myopts
14317         quiet = "--quiet" in myopts
14318         if pretend or fetchonly:
14319                 # make the mtimedb readonly
14320                 mtimedb.filename = None
14321         if '--digest' in myopts or 'digest' in settings.features:
14322                 if '--digest' in myopts:
14323                         msg = "The --digest option"
14324                 else:
14325                         msg = "The FEATURES=digest setting"
14326
14327                 msg += " can prevent corruption from being" + \
14328                         " noticed. The `repoman manifest` command is the preferred" + \
14329                         " way to generate manifests and it is capable of doing an" + \
14330                         " entire repository or category at once."
14331                 prefix = bad(" * ")
14332                 writemsg(prefix + "\n")
14333                 from textwrap import wrap
14334                 for line in wrap(msg, 72):
14335                         writemsg("%s%s\n" % (prefix, line))
14336                 writemsg(prefix + "\n")
14337
14338         if "--quiet" not in myopts and \
14339                 ("--pretend" in myopts or "--ask" in myopts or \
14340                 "--tree" in myopts or "--verbose" in myopts):
14341                 action = ""
14342                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14343                         action = "fetched"
14344                 elif "--buildpkgonly" in myopts:
14345                         action = "built"
14346                 else:
14347                         action = "merged"
14348                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14349                         print
14350                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14351                         print
14352                 else:
14353                         print
14354                         print darkgreen("These are the packages that would be %s, in order:") % action
14355                         print
14356
14357         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14358         if not show_spinner:
14359                 spinner.update = spinner.update_quiet
14360
14361         if resume:
14362                 favorites = mtimedb["resume"].get("favorites")
14363                 if not isinstance(favorites, list):
14364                         favorites = []
14365
14366                 if show_spinner:
14367                         print "Calculating dependencies  ",
14368                 myparams = create_depgraph_params(myopts, myaction)
14369
14370                 resume_data = mtimedb["resume"]
14371                 mergelist = resume_data["mergelist"]
14372                 if mergelist and "--skipfirst" in myopts:
14373                         for i, task in enumerate(mergelist):
14374                                 if isinstance(task, list) and \
14375                                         task and task[-1] == "merge":
14376                                         del mergelist[i]
14377                                         break
14378
14379                 success = False
14380                 mydepgraph = None
14381                 try:
14382                         success, mydepgraph, dropped_tasks = resume_depgraph(
14383                                 settings, trees, mtimedb, myopts, myparams, spinner)
14384                 except (portage.exception.PackageNotFound,
14385                         depgraph.UnsatisfiedResumeDep), e:
14386                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14387                                 mydepgraph = e.depgraph
14388                         if show_spinner:
14389                                 print
14390                         from textwrap import wrap
14391                         from portage.output import EOutput
14392                         out = EOutput()
14393
14394                         resume_data = mtimedb["resume"]
14395                         mergelist = resume_data.get("mergelist")
14396                         if not isinstance(mergelist, list):
14397                                 mergelist = []
14398                         if mergelist and debug or (verbose and not quiet):
14399                                 out.eerror("Invalid resume list:")
14400                                 out.eerror("")
14401                                 indent = "  "
14402                                 for task in mergelist:
14403                                         if isinstance(task, list):
14404                                                 out.eerror(indent + str(tuple(task)))
14405                                 out.eerror("")
14406
14407                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14408                                 out.eerror("One or more packages are either masked or " + \
14409                                         "have missing dependencies:")
14410                                 out.eerror("")
14411                                 indent = "  "
14412                                 for dep in e.value:
14413                                         if dep.atom is None:
14414                                                 out.eerror(indent + "Masked package:")
14415                                                 out.eerror(2 * indent + str(dep.parent))
14416                                                 out.eerror("")
14417                                         else:
14418                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14419                                                 out.eerror(2 * indent + str(dep.parent))
14420                                                 out.eerror("")
14421                                 msg = "The resume list contains packages " + \
14422                                         "that are either masked or have " + \
14423                                         "unsatisfied dependencies. " + \
14424                                         "Please restart/continue " + \
14425                                         "the operation manually, or use --skipfirst " + \
14426                                         "to skip the first package in the list and " + \
14427                                         "any other packages that may be " + \
14428                                         "masked or have missing dependencies."
14429                                 for line in wrap(msg, 72):
14430                                         out.eerror(line)
14431                         elif isinstance(e, portage.exception.PackageNotFound):
14432                                 out.eerror("An expected package is " + \
14433                                         "not available: %s" % str(e))
14434                                 out.eerror("")
14435                                 msg = "The resume list contains one or more " + \
14436                                         "packages that are no longer " + \
14437                                         "available. Please restart/continue " + \
14438                                         "the operation manually."
14439                                 for line in wrap(msg, 72):
14440                                         out.eerror(line)
14441                 else:
14442                         if show_spinner:
14443                                 print "\b\b... done!"
14444
14445                 if success:
14446                         if dropped_tasks:
14447                                 portage.writemsg("!!! One or more packages have been " + \
14448                                         "dropped due to\n" + \
14449                                         "!!! masking or unsatisfied dependencies:\n\n",
14450                                         noiselevel=-1)
14451                                 for task in dropped_tasks:
14452                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14453                                 portage.writemsg("\n", noiselevel=-1)
14454                         del dropped_tasks
14455                 else:
14456                         if mydepgraph is not None:
14457                                 mydepgraph.display_problems()
14458                         if not (ask or pretend):
14459                                 # delete the current list and also the backup
14460                                 # since it's probably stale too.
14461                                 for k in ("resume", "resume_backup"):
14462                                         mtimedb.pop(k, None)
14463                                 mtimedb.commit()
14464
14465                         return 1
14466         else:
14467                 if ("--resume" in myopts):
14468                         print darkgreen("emerge: It seems we have nothing to resume...")
14469                         return os.EX_OK
14470
14471                 myparams = create_depgraph_params(myopts, myaction)
14472                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14473                         print "Calculating dependencies  ",
14474                         sys.stdout.flush()
14475                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14476                 try:
14477                         retval, favorites = mydepgraph.select_files(myfiles)
14478                 except portage.exception.PackageNotFound, e:
14479                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14480                         return 1
14481                 except portage.exception.PackageSetNotFound, e:
14482                         root_config = trees[settings["ROOT"]]["root_config"]
14483                         display_missing_pkg_set(root_config, e.value)
14484                         return 1
14485                 if show_spinner:
14486                         print "\b\b... done!"
14487                 if not retval:
14488                         mydepgraph.display_problems()
14489                         return 1
14490
14491         if "--pretend" not in myopts and \
14492                 ("--ask" in myopts or "--tree" in myopts or \
14493                 "--verbose" in myopts) and \
14494                 not ("--quiet" in myopts and "--ask" not in myopts):
14495                 if "--resume" in myopts:
14496                         mymergelist = mydepgraph.altlist()
14497                         if len(mymergelist) == 0:
14498                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14499                                 return os.EX_OK
14500                         favorites = mtimedb["resume"]["favorites"]
14501                         retval = mydepgraph.display(
14502                                 mydepgraph.altlist(reversed=tree),
14503                                 favorites=favorites)
14504                         mydepgraph.display_problems()
14505                         if retval != os.EX_OK:
14506                                 return retval
14507                         prompt="Would you like to resume merging these packages?"
14508                 else:
14509                         retval = mydepgraph.display(
14510                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14511                                 favorites=favorites)
14512                         mydepgraph.display_problems()
14513                         if retval != os.EX_OK:
14514                                 return retval
14515                         mergecount=0
14516                         for x in mydepgraph.altlist():
14517                                 if isinstance(x, Package) and x.operation == "merge":
14518                                         mergecount += 1
14519
14520                         if mergecount==0:
14521                                 sets = trees[settings["ROOT"]]["root_config"].sets
14522                                 world_candidates = None
14523                                 if "--noreplace" in myopts and \
14524                                         not oneshot and favorites:
14525                                         # Sets that are not world candidates are filtered
14526                                         # out here since the favorites list needs to be
14527                                         # complete for depgraph.loadResumeCommand() to
14528                                         # operate correctly.
14529                                         world_candidates = [x for x in favorites \
14530                                                 if not (x.startswith(SETPREFIX) and \
14531                                                 not sets[x[1:]].world_candidate)]
14532                                 if "--noreplace" in myopts and \
14533                                         not oneshot and world_candidates:
14534                                         print
14535                                         for x in world_candidates:
14536                                                 print " %s %s" % (good("*"), x)
14537                                         prompt="Would you like to add these packages to your world favorites?"
14538                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14539                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14540                                 else:
14541                                         print
14542                                         print "Nothing to merge; quitting."
14543                                         print
14544                                         return os.EX_OK
14545                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14546                                 prompt="Would you like to fetch the source files for these packages?"
14547                         else:
14548                                 prompt="Would you like to merge these packages?"
14549                 print
14550                 if "--ask" in myopts and userquery(prompt) == "No":
14551                         print
14552                         print "Quitting."
14553                         print
14554                         return os.EX_OK
14555                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14556                 myopts.pop("--ask", None)
14557
14558         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14559                 if ("--resume" in myopts):
14560                         mymergelist = mydepgraph.altlist()
14561                         if len(mymergelist) == 0:
14562                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14563                                 return os.EX_OK
14564                         favorites = mtimedb["resume"]["favorites"]
14565                         retval = mydepgraph.display(
14566                                 mydepgraph.altlist(reversed=tree),
14567                                 favorites=favorites)
14568                         mydepgraph.display_problems()
14569                         if retval != os.EX_OK:
14570                                 return retval
14571                 else:
14572                         retval = mydepgraph.display(
14573                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14574                                 favorites=favorites)
14575                         mydepgraph.display_problems()
14576                         if retval != os.EX_OK:
14577                                 return retval
14578                         if "--buildpkgonly" in myopts:
14579                                 graph_copy = mydepgraph.digraph.clone()
14580                                 removed_nodes = set()
14581                                 for node in graph_copy:
14582                                         if not isinstance(node, Package) or \
14583                                                 node.operation == "nomerge":
14584                                                 removed_nodes.add(node)
14585                                 graph_copy.difference_update(removed_nodes)
14586                                 if not graph_copy.hasallzeros(ignore_priority = \
14587                                         DepPrioritySatisfiedRange.ignore_medium):
14588                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14589                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14590                                         return 1
14591         else:
14592                 if "--buildpkgonly" in myopts:
14593                         graph_copy = mydepgraph.digraph.clone()
14594                         removed_nodes = set()
14595                         for node in graph_copy:
14596                                 if not isinstance(node, Package) or \
14597                                         node.operation == "nomerge":
14598                                         removed_nodes.add(node)
14599                         graph_copy.difference_update(removed_nodes)
14600                         if not graph_copy.hasallzeros(ignore_priority = \
14601                                 DepPrioritySatisfiedRange.ignore_medium):
14602                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14603                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14604                                 return 1
14605
14606                 if ("--resume" in myopts):
14607                         favorites=mtimedb["resume"]["favorites"]
14608                         mymergelist = mydepgraph.altlist()
14609                         mydepgraph.break_refs(mymergelist)
14610                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14611                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14612                         del mydepgraph, mymergelist
14613                         clear_caches(trees)
14614
14615                         retval = mergetask.merge()
14616                         merge_count = mergetask.curval
14617                 else:
14618                         if "resume" in mtimedb and \
14619                         "mergelist" in mtimedb["resume"] and \
14620                         len(mtimedb["resume"]["mergelist"]) > 1:
14621                                 mtimedb["resume_backup"] = mtimedb["resume"]
14622                                 del mtimedb["resume"]
14623                                 mtimedb.commit()
14624                         mtimedb["resume"]={}
14625                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14626                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14627                         # a list type for options.
14628                         mtimedb["resume"]["myopts"] = myopts.copy()
14629
14630                         # Convert Atom instances to plain str.
14631                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14632
14633                         pkglist = mydepgraph.altlist()
14634                         mydepgraph.saveNomergeFavorites()
14635                         mydepgraph.break_refs(pkglist)
14636                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14637                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14638                         del mydepgraph, pkglist
14639                         clear_caches(trees)
14640
14641                         retval = mergetask.merge()
14642                         merge_count = mergetask.curval
14643
14644                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14645                         if "yes" == settings.get("AUTOCLEAN"):
14646                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14647                                 unmerge(trees[settings["ROOT"]]["root_config"],
14648                                         myopts, "clean", [],
14649                                         ldpath_mtimes, autoclean=1)
14650                         else:
14651                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14652                                         + " AUTOCLEAN is disabled.  This can cause serious"
14653                                         + " problems due to overlapping packages.\n")
14654                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14655
14656                 return retval
14657
14658 def multiple_actions(action1, action2):
14659         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14660         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14661         sys.exit(1)
14662
14663 def insert_optional_args(args):
14664         """
14665         Parse optional arguments and insert a value if one has
14666         not been provided. This is done before feeding the args
14667         to the optparse parser since that parser does not support
14668         this feature natively.
14669         """
14670
14671         new_args = []
14672         jobs_opts = ("-j", "--jobs")
14673         arg_stack = args[:]
14674         arg_stack.reverse()
14675         while arg_stack:
14676                 arg = arg_stack.pop()
14677
14678                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14679                 if not (short_job_opt or arg in jobs_opts):
14680                         new_args.append(arg)
14681                         continue
14682
14683                 # Insert an empty placeholder in order to
14684                 # satisfy the requirements of optparse.
14685
14686                 new_args.append("--jobs")
14687                 job_count = None
14688                 saved_opts = None
14689                 if short_job_opt and len(arg) > 2:
14690                         if arg[:2] == "-j":
14691                                 try:
14692                                         job_count = int(arg[2:])
14693                                 except ValueError:
14694                                         saved_opts = arg[2:]
14695                         else:
14696                                 job_count = "True"
14697                                 saved_opts = arg[1:].replace("j", "")
14698
14699                 if job_count is None and arg_stack:
14700                         try:
14701                                 job_count = int(arg_stack[-1])
14702                         except ValueError:
14703                                 pass
14704                         else:
14705                                 # Discard the job count from the stack
14706                                 # since we're consuming it here.
14707                                 arg_stack.pop()
14708
14709                 if job_count is None:
14710                         # unlimited number of jobs
14711                         new_args.append("True")
14712                 else:
14713                         new_args.append(str(job_count))
14714
14715                 if saved_opts is not None:
14716                         new_args.append("-" + saved_opts)
14717
14718         return new_args
14719
14720 def parse_opts(tmpcmdline, silent=False):
14721         myaction=None
14722         myopts = {}
14723         myfiles=[]
14724
14725         global actions, options, shortmapping
14726
14727         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14728         argument_options = {
14729                 "--config-root": {
14730                         "help":"specify the location for portage configuration files",
14731                         "action":"store"
14732                 },
14733                 "--color": {
14734                         "help":"enable or disable color output",
14735                         "type":"choice",
14736                         "choices":("y", "n")
14737                 },
14738
14739                 "--jobs": {
14740
14741                         "help"   : "Specifies the number of packages to build " + \
14742                                 "simultaneously.",
14743
14744                         "action" : "store"
14745                 },
14746
14747                 "--load-average": {
14748
14749                         "help"   :"Specifies that no new builds should be started " + \
14750                                 "if there are other builds running and the load average " + \
14751                                 "is at least LOAD (a floating-point number).",
14752
14753                         "action" : "store"
14754                 },
14755
14756                 "--with-bdeps": {
14757                         "help":"include unnecessary build time dependencies",
14758                         "type":"choice",
14759                         "choices":("y", "n")
14760                 },
14761                 "--reinstall": {
14762                         "help":"specify conditions to trigger package reinstallation",
14763                         "type":"choice",
14764                         "choices":["changed-use"]
14765                 }
14766         }
14767
14768         from optparse import OptionParser
14769         parser = OptionParser()
14770         if parser.has_option("--help"):
14771                 parser.remove_option("--help")
14772
14773         for action_opt in actions:
14774                 parser.add_option("--" + action_opt, action="store_true",
14775                         dest=action_opt.replace("-", "_"), default=False)
14776         for myopt in options:
14777                 parser.add_option(myopt, action="store_true",
14778                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14779         for shortopt, longopt in shortmapping.iteritems():
14780                 parser.add_option("-" + shortopt, action="store_true",
14781                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14782         for myalias, myopt in longopt_aliases.iteritems():
14783                 parser.add_option(myalias, action="store_true",
14784                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14785
14786         for myopt, kwargs in argument_options.iteritems():
14787                 parser.add_option(myopt,
14788                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14789
14790         tmpcmdline = insert_optional_args(tmpcmdline)
14791
14792         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14793
14794         if myoptions.jobs:
14795                 jobs = None
14796                 if myoptions.jobs == "True":
14797                         jobs = True
14798                 else:
14799                         try:
14800                                 jobs = int(myoptions.jobs)
14801                         except ValueError:
14802                                 jobs = -1
14803
14804                 if jobs is not True and \
14805                         jobs < 1:
14806                         jobs = None
14807                         if not silent:
14808                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14809                                         (myoptions.jobs,), noiselevel=-1)
14810
14811                 myoptions.jobs = jobs
14812
14813         if myoptions.load_average:
14814                 try:
14815                         load_average = float(myoptions.load_average)
14816                 except ValueError:
14817                         load_average = 0.0
14818
14819                 if load_average <= 0.0:
14820                         load_average = None
14821                         if not silent:
14822                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14823                                         (myoptions.load_average,), noiselevel=-1)
14824
14825                 myoptions.load_average = load_average
14826
14827         for myopt in options:
14828                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14829                 if v:
14830                         myopts[myopt] = True
14831
14832         for myopt in argument_options:
14833                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14834                 if v is not None:
14835                         myopts[myopt] = v
14836
14837         if myoptions.searchdesc:
14838                 myoptions.search = True
14839
14840         for action_opt in actions:
14841                 v = getattr(myoptions, action_opt.replace("-", "_"))
14842                 if v:
14843                         if myaction:
14844                                 multiple_actions(myaction, action_opt)
14845                                 sys.exit(1)
14846                         myaction = action_opt
14847
14848         myfiles += myargs
14849
14850         return myaction, myopts, myfiles
14851
14852 def validate_ebuild_environment(trees):
14853         for myroot in trees:
14854                 settings = trees[myroot]["vartree"].settings
14855                 settings.validate()
14856
14857 def clear_caches(trees):
14858         for d in trees.itervalues():
14859                 d["porttree"].dbapi.melt()
14860                 d["porttree"].dbapi._aux_cache.clear()
14861                 d["bintree"].dbapi._aux_cache.clear()
14862                 d["bintree"].dbapi._clear_cache()
14863                 d["vartree"].dbapi.linkmap._clear_cache()
14864         portage.dircache.clear()
14865         gc.collect()
14866
14867 def load_emerge_config(trees=None):
14868         kwargs = {}
14869         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14870                 v = os.environ.get(envvar, None)
14871                 if v and v.strip():
14872                         kwargs[k] = v
14873         trees = portage.create_trees(trees=trees, **kwargs)
14874
14875         for root, root_trees in trees.iteritems():
14876                 settings = root_trees["vartree"].settings
14877                 setconfig = load_default_config(settings, root_trees)
14878                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14879
14880         settings = trees["/"]["vartree"].settings
14881
14882         for myroot in trees:
14883                 if myroot != "/":
14884                         settings = trees[myroot]["vartree"].settings
14885                         break
14886
14887         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14888         mtimedb = portage.MtimeDB(mtimedbfile)
14889         
14890         return settings, trees, mtimedb
14891
14892 def adjust_config(myopts, settings):
14893         """Make emerge specific adjustments to the config."""
14894
14895         # To enhance usability, make some vars case insensitive by forcing them to
14896         # lower case.
14897         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14898                 if myvar in settings:
14899                         settings[myvar] = settings[myvar].lower()
14900                         settings.backup_changes(myvar)
14901         del myvar
14902
14903         # Kill noauto as it will break merges otherwise.
14904         if "noauto" in settings.features:
14905                 while "noauto" in settings.features:
14906                         settings.features.remove("noauto")
14907                 settings["FEATURES"] = " ".join(settings.features)
14908                 settings.backup_changes("FEATURES")
14909
14910         CLEAN_DELAY = 5
14911         try:
14912                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14913         except ValueError, e:
14914                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14915                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14916                         settings["CLEAN_DELAY"], noiselevel=-1)
14917         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14918         settings.backup_changes("CLEAN_DELAY")
14919
14920         EMERGE_WARNING_DELAY = 10
14921         try:
14922                 EMERGE_WARNING_DELAY = int(settings.get(
14923                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14924         except ValueError, e:
14925                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14926                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14927                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14928         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14929         settings.backup_changes("EMERGE_WARNING_DELAY")
14930
14931         if "--quiet" in myopts:
14932                 settings["PORTAGE_QUIET"]="1"
14933                 settings.backup_changes("PORTAGE_QUIET")
14934
14935         if "--verbose" in myopts:
14936                 settings["PORTAGE_VERBOSE"] = "1"
14937                 settings.backup_changes("PORTAGE_VERBOSE")
14938
14939         # Set so that configs will be merged regardless of remembered status
14940         if ("--noconfmem" in myopts):
14941                 settings["NOCONFMEM"]="1"
14942                 settings.backup_changes("NOCONFMEM")
14943
14944         # Set various debug markers... They should be merged somehow.
14945         PORTAGE_DEBUG = 0
14946         try:
14947                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14948                 if PORTAGE_DEBUG not in (0, 1):
14949                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14950                                 PORTAGE_DEBUG, noiselevel=-1)
14951                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14952                                 noiselevel=-1)
14953                         PORTAGE_DEBUG = 0
14954         except ValueError, e:
14955                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14956                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14957                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14958                 del e
14959         if "--debug" in myopts:
14960                 PORTAGE_DEBUG = 1
14961         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14962         settings.backup_changes("PORTAGE_DEBUG")
14963
14964         if settings.get("NOCOLOR") not in ("yes","true"):
14965                 portage.output.havecolor = 1
14966
14967         """The explicit --color < y | n > option overrides the NOCOLOR environment
14968         variable and stdout auto-detection."""
14969         if "--color" in myopts:
14970                 if "y" == myopts["--color"]:
14971                         portage.output.havecolor = 1
14972                         settings["NOCOLOR"] = "false"
14973                 else:
14974                         portage.output.havecolor = 0
14975                         settings["NOCOLOR"] = "true"
14976                 settings.backup_changes("NOCOLOR")
14977         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14978                 portage.output.havecolor = 0
14979                 settings["NOCOLOR"] = "true"
14980                 settings.backup_changes("NOCOLOR")
14981
14982 def apply_priorities(settings):
14983         ionice(settings)
14984         nice(settings)
14985
14986 def nice(settings):
14987         try:
14988                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14989         except (OSError, ValueError), e:
14990                 out = portage.output.EOutput()
14991                 out.eerror("Failed to change nice value to '%s'" % \
14992                         settings["PORTAGE_NICENESS"])
14993                 out.eerror("%s\n" % str(e))
14994
14995 def ionice(settings):
14996
14997         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14998         if ionice_cmd:
14999                 ionice_cmd = shlex.split(ionice_cmd)
15000         if not ionice_cmd:
15001                 return
15002
15003         from portage.util import varexpand
15004         variables = {"PID" : str(os.getpid())}
15005         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15006
15007         try:
15008                 rval = portage.process.spawn(cmd, env=os.environ)
15009         except portage.exception.CommandNotFound:
15010                 # The OS kernel probably doesn't support ionice,
15011                 # so return silently.
15012                 return
15013
15014         if rval != os.EX_OK:
15015                 out = portage.output.EOutput()
15016                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15017                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15018
15019 def display_missing_pkg_set(root_config, set_name):
15020
15021         msg = []
15022         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15023                 "The following sets exist:") % \
15024                 colorize("INFORM", set_name))
15025         msg.append("")
15026
15027         for s in sorted(root_config.sets):
15028                 msg.append("    %s" % s)
15029         msg.append("")
15030
15031         writemsg_level("".join("%s\n" % l for l in msg),
15032                 level=logging.ERROR, noiselevel=-1)
15033
15034 def expand_set_arguments(myfiles, myaction, root_config):
15035         retval = os.EX_OK
15036         setconfig = root_config.setconfig
15037
15038         sets = setconfig.getSets()
15039
15040         # In order to know exactly which atoms/sets should be added to the
15041         # world file, the depgraph performs set expansion later. It will get
15042         # confused about where the atoms came from if it's not allowed to
15043         # expand them itself.
15044         do_not_expand = (None, )
15045         newargs = []
15046         for a in myfiles:
15047                 if a in ("system", "world"):
15048                         newargs.append(SETPREFIX+a)
15049                 else:
15050                         newargs.append(a)
15051         myfiles = newargs
15052         del newargs
15053         newargs = []
15054
15055         # separators for set arguments
15056         ARG_START = "{"
15057         ARG_END = "}"
15058
15059         # WARNING: all operators must be of equal length
15060         IS_OPERATOR = "/@"
15061         DIFF_OPERATOR = "-@"
15062         UNION_OPERATOR = "+@"
15063         
15064         for i in range(0, len(myfiles)):
15065                 if myfiles[i].startswith(SETPREFIX):
15066                         start = 0
15067                         end = 0
15068                         x = myfiles[i][len(SETPREFIX):]
15069                         newset = ""
15070                         while x:
15071                                 start = x.find(ARG_START)
15072                                 end = x.find(ARG_END)
15073                                 if start > 0 and start < end:
15074                                         namepart = x[:start]
15075                                         argpart = x[start+1:end]
15076                                 
15077                                         # TODO: implement proper quoting
15078                                         args = argpart.split(",")
15079                                         options = {}
15080                                         for a in args:
15081                                                 if "=" in a:
15082                                                         k, v  = a.split("=", 1)
15083                                                         options[k] = v
15084                                                 else:
15085                                                         options[a] = "True"
15086                                         setconfig.update(namepart, options)
15087                                         newset += (x[:start-len(namepart)]+namepart)
15088                                         x = x[end+len(ARG_END):]
15089                                 else:
15090                                         newset += x
15091                                         x = ""
15092                         myfiles[i] = SETPREFIX+newset
15093                                 
15094         sets = setconfig.getSets()
15095
15096         # display errors that occured while loading the SetConfig instance
15097         for e in setconfig.errors:
15098                 print colorize("BAD", "Error during set creation: %s" % e)
15099         
15100         # emerge relies on the existance of sets with names "world" and "system"
15101         required_sets = ("world", "system")
15102         missing_sets = []
15103
15104         for s in required_sets:
15105                 if s not in sets:
15106                         missing_sets.append(s)
15107         if missing_sets:
15108                 if len(missing_sets) > 2:
15109                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15110                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15111                 elif len(missing_sets) == 2:
15112                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15113                 else:
15114                         missing_sets_str = '"%s"' % missing_sets[-1]
15115                 msg = ["emerge: incomplete set configuration, " + \
15116                         "missing set(s): %s" % missing_sets_str]
15117                 if sets:
15118                         msg.append("        sets defined: %s" % ", ".join(sets))
15119                 msg.append("        This usually means that '%s'" % \
15120                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15121                 msg.append("        is missing or corrupt.")
15122                 for line in msg:
15123                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15124                 return (None, 1)
15125         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15126
15127         for a in myfiles:
15128                 if a.startswith(SETPREFIX):
15129                         # support simple set operations (intersection, difference and union)
15130                         # on the commandline. Expressions are evaluated strictly left-to-right
15131                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15132                                 expression = a[len(SETPREFIX):]
15133                                 expr_sets = []
15134                                 expr_ops = []
15135                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15136                                         is_pos = expression.rfind(IS_OPERATOR)
15137                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15138                                         union_pos = expression.rfind(UNION_OPERATOR)
15139                                         op_pos = max(is_pos, diff_pos, union_pos)
15140                                         s1 = expression[:op_pos]
15141                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15142                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15143                                         if not s2 in sets:
15144                                                 display_missing_pkg_set(root_config, s2)
15145                                                 return (None, 1)
15146                                         expr_sets.insert(0, s2)
15147                                         expr_ops.insert(0, op)
15148                                         expression = s1
15149                                 if not expression in sets:
15150                                         display_missing_pkg_set(root_config, expression)
15151                                         return (None, 1)
15152                                 expr_sets.insert(0, expression)
15153                                 result = set(setconfig.getSetAtoms(expression))
15154                                 for i in range(0, len(expr_ops)):
15155                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15156                                         if expr_ops[i] == IS_OPERATOR:
15157                                                 result.intersection_update(s2)
15158                                         elif expr_ops[i] == DIFF_OPERATOR:
15159                                                 result.difference_update(s2)
15160                                         elif expr_ops[i] == UNION_OPERATOR:
15161                                                 result.update(s2)
15162                                         else:
15163                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15164                                 newargs.extend(result)
15165                         else:                   
15166                                 s = a[len(SETPREFIX):]
15167                                 if s not in sets:
15168                                         display_missing_pkg_set(root_config, s)
15169                                         return (None, 1)
15170                                 setconfig.active.append(s)
15171                                 try:
15172                                         set_atoms = setconfig.getSetAtoms(s)
15173                                 except portage.exception.PackageSetNotFound, e:
15174                                         writemsg_level(("emerge: the given set '%s' " + \
15175                                                 "contains a non-existent set named '%s'.\n") % \
15176                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15177                                         return (None, 1)
15178                                 if myaction in unmerge_actions and \
15179                                                 not sets[s].supportsOperation("unmerge"):
15180                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15181                                                 "not support unmerge operations\n")
15182                                         retval = 1
15183                                 elif not set_atoms:
15184                                         print "emerge: '%s' is an empty set" % s
15185                                 elif myaction not in do_not_expand:
15186                                         newargs.extend(set_atoms)
15187                                 else:
15188                                         newargs.append(SETPREFIX+s)
15189                                 for e in sets[s].errors:
15190                                         print e
15191                 else:
15192                         newargs.append(a)
15193         return (newargs, retval)
15194
15195 def repo_name_check(trees):
15196         missing_repo_names = set()
15197         for root, root_trees in trees.iteritems():
15198                 if "porttree" in root_trees:
15199                         portdb = root_trees["porttree"].dbapi
15200                         missing_repo_names.update(portdb.porttrees)
15201                         repos = portdb.getRepositories()
15202                         for r in repos:
15203                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15204                         if portdb.porttree_root in missing_repo_names and \
15205                                 not os.path.exists(os.path.join(
15206                                 portdb.porttree_root, "profiles")):
15207                                 # This is normal if $PORTDIR happens to be empty,
15208                                 # so don't warn about it.
15209                                 missing_repo_names.remove(portdb.porttree_root)
15210
15211         if missing_repo_names:
15212                 msg = []
15213                 msg.append("WARNING: One or more repositories " + \
15214                         "have missing repo_name entries:")
15215                 msg.append("")
15216                 for p in missing_repo_names:
15217                         msg.append("\t%s/profiles/repo_name" % (p,))
15218                 msg.append("")
15219                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15220                         "should be a plain text file containing a unique " + \
15221                         "name for the repository on the first line.", 70))
15222                 writemsg_level("".join("%s\n" % l for l in msg),
15223                         level=logging.WARNING, noiselevel=-1)
15224
15225         return bool(missing_repo_names)
15226
15227 def config_protect_check(trees):
15228         for root, root_trees in trees.iteritems():
15229                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15230                         msg = "!!! CONFIG_PROTECT is empty"
15231                         if root != "/":
15232                                 msg += " for '%s'" % root
15233                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15234
15235 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15236
15237         if "--quiet" in myopts:
15238                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15239                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15240                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15241                         print "    " + colorize("INFORM", cp)
15242                 return
15243
15244         s = search(root_config, spinner, "--searchdesc" in myopts,
15245                 "--quiet" not in myopts, "--usepkg" in myopts,
15246                 "--usepkgonly" in myopts)
15247         null_cp = portage.dep_getkey(insert_category_into_atom(
15248                 arg, "null"))
15249         cat, atom_pn = portage.catsplit(null_cp)
15250         s.searchkey = atom_pn
15251         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15252                 s.addCP(cp)
15253         s.output()
15254         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15255         print "!!! one of the above fully-qualified ebuild names instead.\n"
15256
15257 def profile_check(trees, myaction, myopts):
15258         if myaction in ("info", "sync"):
15259                 return os.EX_OK
15260         elif "--version" in myopts or "--help" in myopts:
15261                 return os.EX_OK
15262         for root, root_trees in trees.iteritems():
15263                 if root_trees["root_config"].settings.profiles:
15264                         continue
15265                 # generate some profile related warning messages
15266                 validate_ebuild_environment(trees)
15267                 msg = "If you have just changed your profile configuration, you " + \
15268                         "should revert back to the previous configuration. Due to " + \
15269                         "your current profile being invalid, allowed actions are " + \
15270                         "limited to --help, --info, --sync, and --version."
15271                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15272                         level=logging.ERROR, noiselevel=-1)
15273                 return 1
15274         return os.EX_OK
15275
15276 def emerge_main():
15277         global portage  # NFC why this is necessary now - genone
15278         portage._disable_legacy_globals()
15279         # Disable color until we're sure that it should be enabled (after
15280         # EMERGE_DEFAULT_OPTS has been parsed).
15281         portage.output.havecolor = 0
15282         # This first pass is just for options that need to be known as early as
15283         # possible, such as --config-root.  They will be parsed again later,
15284         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15285         # the value of --config-root).
15286         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15287         if "--debug" in myopts:
15288                 os.environ["PORTAGE_DEBUG"] = "1"
15289         if "--config-root" in myopts:
15290                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15291
15292         # Portage needs to ensure a sane umask for the files it creates.
15293         os.umask(022)
15294         settings, trees, mtimedb = load_emerge_config()
15295         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15296         rval = profile_check(trees, myaction, myopts)
15297         if rval != os.EX_OK:
15298                 return rval
15299
15300         if portage._global_updates(trees, mtimedb["updates"]):
15301                 mtimedb.commit()
15302                 # Reload the whole config from scratch.
15303                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15304                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15305
15306         xterm_titles = "notitles" not in settings.features
15307
15308         tmpcmdline = []
15309         if "--ignore-default-opts" not in myopts:
15310                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15311         tmpcmdline.extend(sys.argv[1:])
15312         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15313
15314         if "--digest" in myopts:
15315                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15316                 # Reload the whole config from scratch so that the portdbapi internal
15317                 # config is updated with new FEATURES.
15318                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15319                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15320
15321         for myroot in trees:
15322                 mysettings =  trees[myroot]["vartree"].settings
15323                 mysettings.unlock()
15324                 adjust_config(myopts, mysettings)
15325                 if '--pretend' not in myopts and myaction in \
15326                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15327                         mysettings["PORTAGE_COUNTER_HASH"] = \
15328                                 trees[myroot]["vartree"].dbapi._counter_hash()
15329                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15330                 mysettings.lock()
15331                 del myroot, mysettings
15332
15333         apply_priorities(settings)
15334
15335         spinner = stdout_spinner()
15336         if "candy" in settings.features:
15337                 spinner.update = spinner.update_scroll
15338
15339         if "--quiet" not in myopts:
15340                 portage.deprecated_profile_check(settings=settings)
15341                 repo_name_check(trees)
15342                 config_protect_check(trees)
15343
15344         eclasses_overridden = {}
15345         for mytrees in trees.itervalues():
15346                 mydb = mytrees["porttree"].dbapi
15347                 # Freeze the portdbapi for performance (memoize all xmatch results).
15348                 mydb.freeze()
15349                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15350         del mytrees, mydb
15351
15352         if eclasses_overridden and \
15353                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15354                 prefix = bad(" * ")
15355                 if len(eclasses_overridden) == 1:
15356                         writemsg(prefix + "Overlay eclass overrides " + \
15357                                 "eclass from PORTDIR:\n", noiselevel=-1)
15358                 else:
15359                         writemsg(prefix + "Overlay eclasses override " + \
15360                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15361                 writemsg(prefix + "\n", noiselevel=-1)
15362                 for eclass_name in sorted(eclasses_overridden):
15363                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15364                                 (eclasses_overridden[eclass_name], eclass_name),
15365                                 noiselevel=-1)
15366                 writemsg(prefix + "\n", noiselevel=-1)
15367                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15368                 "because it will trigger invalidation of cached ebuild metadata " + \
15369                 "that is distributed with the portage tree. If you must " + \
15370                 "override eclasses from PORTDIR then you are advised to add " + \
15371                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15372                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15373                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15374                 "you would like to disable this warning."
15375                 from textwrap import wrap
15376                 for line in wrap(msg, 72):
15377                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15378
15379         if "moo" in myfiles:
15380                 print """
15381
15382   Larry loves Gentoo (""" + platform.system() + """)
15383
15384  _______________________
15385 < Have you mooed today? >
15386  -----------------------
15387         \   ^__^
15388          \  (oo)\_______
15389             (__)\       )\/\ 
15390                 ||----w |
15391                 ||     ||
15392
15393 """
15394
15395         for x in myfiles:
15396                 ext = os.path.splitext(x)[1]
15397                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15398                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15399                         break
15400
15401         root_config = trees[settings["ROOT"]]["root_config"]
15402         if myaction == "list-sets":
15403                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15404                 sys.stdout.flush()
15405                 return os.EX_OK
15406
15407         # only expand sets for actions taking package arguments
15408         oldargs = myfiles[:]
15409         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15410                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15411                 if retval != os.EX_OK:
15412                         return retval
15413
15414                 # Need to handle empty sets specially, otherwise emerge will react 
15415                 # with the help message for empty argument lists
15416                 if oldargs and not myfiles:
15417                         print "emerge: no targets left after set expansion"
15418                         return 0
15419
15420         if ("--tree" in myopts) and ("--columns" in myopts):
15421                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15422                 return 1
15423
15424         if ("--quiet" in myopts):
15425                 spinner.update = spinner.update_quiet
15426                 portage.util.noiselimit = -1
15427
15428         # Always create packages if FEATURES=buildpkg
15429         # Imply --buildpkg if --buildpkgonly
15430         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15431                 if "--buildpkg" not in myopts:
15432                         myopts["--buildpkg"] = True
15433
15434         # Always try and fetch binary packages if FEATURES=getbinpkg
15435         if ("getbinpkg" in settings.features):
15436                 myopts["--getbinpkg"] = True
15437
15438         if "--buildpkgonly" in myopts:
15439                 # --buildpkgonly will not merge anything, so
15440                 # it cancels all binary package options.
15441                 for opt in ("--getbinpkg", "--getbinpkgonly",
15442                         "--usepkg", "--usepkgonly"):
15443                         myopts.pop(opt, None)
15444
15445         if "--fetch-all-uri" in myopts:
15446                 myopts["--fetchonly"] = True
15447
15448         if "--skipfirst" in myopts and "--resume" not in myopts:
15449                 myopts["--resume"] = True
15450
15451         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15452                 myopts["--usepkgonly"] = True
15453
15454         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15455                 myopts["--getbinpkg"] = True
15456
15457         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15458                 myopts["--usepkg"] = True
15459
15460         # Also allow -K to apply --usepkg/-k
15461         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15462                 myopts["--usepkg"] = True
15463
15464         # Allow -p to remove --ask
15465         if ("--pretend" in myopts) and ("--ask" in myopts):
15466                 print ">>> --pretend disables --ask... removing --ask from options."
15467                 del myopts["--ask"]
15468
15469         # forbid --ask when not in a terminal
15470         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15471         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15472                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15473                         noiselevel=-1)
15474                 return 1
15475
15476         if settings.get("PORTAGE_DEBUG", "") == "1":
15477                 spinner.update = spinner.update_quiet
15478                 portage.debug=1
15479                 if "python-trace" in settings.features:
15480                         import portage.debug
15481                         portage.debug.set_trace(True)
15482
15483         if not ("--quiet" in myopts):
15484                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15485                         spinner.update = spinner.update_basic
15486
15487         if myaction == 'version':
15488                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15489                         settings.profile_path, settings["CHOST"],
15490                         trees[settings["ROOT"]]["vartree"].dbapi)
15491                 return 0
15492         elif "--help" in myopts:
15493                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15494                 return 0
15495
15496         if "--debug" in myopts:
15497                 print "myaction", myaction
15498                 print "myopts", myopts
15499
15500         if not myaction and not myfiles and "--resume" not in myopts:
15501                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15502                 return 1
15503
15504         pretend = "--pretend" in myopts
15505         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15506         buildpkgonly = "--buildpkgonly" in myopts
15507
15508         # check if root user is the current user for the actions where emerge needs this
15509         if portage.secpass < 2:
15510                 # We've already allowed "--version" and "--help" above.
15511                 if "--pretend" not in myopts and myaction not in ("search","info"):
15512                         need_superuser = not \
15513                                 (fetchonly or \
15514                                 (buildpkgonly and secpass >= 1) or \
15515                                 myaction in ("metadata", "regen") or \
15516                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15517                         if portage.secpass < 1 or \
15518                                 need_superuser:
15519                                 if need_superuser:
15520                                         access_desc = "superuser"
15521                                 else:
15522                                         access_desc = "portage group"
15523                                 # Always show portage_group_warning() when only portage group
15524                                 # access is required but the user is not in the portage group.
15525                                 from portage.data import portage_group_warning
15526                                 if "--ask" in myopts:
15527                                         myopts["--pretend"] = True
15528                                         del myopts["--ask"]
15529                                         print ("%s access is required... " + \
15530                                                 "adding --pretend to options.\n") % access_desc
15531                                         if portage.secpass < 1 and not need_superuser:
15532                                                 portage_group_warning()
15533                                 else:
15534                                         sys.stderr.write(("emerge: %s access is " + \
15535                                                 "required.\n\n") % access_desc)
15536                                         if portage.secpass < 1 and not need_superuser:
15537                                                 portage_group_warning()
15538                                         return 1
15539
15540         disable_emergelog = False
15541         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15542                 if x in myopts:
15543                         disable_emergelog = True
15544                         break
15545         if myaction in ("search", "info"):
15546                 disable_emergelog = True
15547         if disable_emergelog:
15548                 """ Disable emergelog for everything except build or unmerge
15549                 operations.  This helps minimize parallel emerge.log entries that can
15550                 confuse log parsers.  We especially want it disabled during
15551                 parallel-fetch, which uses --resume --fetchonly."""
15552                 global emergelog
15553                 def emergelog(*pargs, **kargs):
15554                         pass
15555
15556         if not "--pretend" in myopts:
15557                 emergelog(xterm_titles, "Started emerge on: "+\
15558                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15559                 myelogstr=""
15560                 if myopts:
15561                         myelogstr=" ".join(myopts)
15562                 if myaction:
15563                         myelogstr+=" "+myaction
15564                 if myfiles:
15565                         myelogstr += " " + " ".join(oldargs)
15566                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15567         del oldargs
15568
15569         def emergeexitsig(signum, frame):
15570                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15571                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15572                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15573                 sys.exit(100+signum)
15574         signal.signal(signal.SIGINT, emergeexitsig)
15575         signal.signal(signal.SIGTERM, emergeexitsig)
15576
15577         def emergeexit():
15578                 """This gets out final log message in before we quit."""
15579                 if "--pretend" not in myopts:
15580                         emergelog(xterm_titles, " *** terminating.")
15581                 if "notitles" not in settings.features:
15582                         xtermTitleReset()
15583         portage.atexit_register(emergeexit)
15584
15585         if myaction in ("config", "metadata", "regen", "sync"):
15586                 if "--pretend" in myopts:
15587                         sys.stderr.write(("emerge: The '%s' action does " + \
15588                                 "not support '--pretend'.\n") % myaction)
15589                         return 1
15590
15591         if "sync" == myaction:
15592                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15593         elif "metadata" == myaction:
15594                 action_metadata(settings, portdb, myopts)
15595         elif myaction=="regen":
15596                 validate_ebuild_environment(trees)
15597                 return action_regen(settings, portdb, myopts.get("--jobs"),
15598                         myopts.get("--load-average"))
15599         # HELP action
15600         elif "config"==myaction:
15601                 validate_ebuild_environment(trees)
15602                 action_config(settings, trees, myopts, myfiles)
15603
15604         # SEARCH action
15605         elif "search"==myaction:
15606                 validate_ebuild_environment(trees)
15607                 action_search(trees[settings["ROOT"]]["root_config"],
15608                         myopts, myfiles, spinner)
15609         elif myaction in ("clean", "unmerge") or \
15610                 (myaction == "prune" and "--nodeps" in myopts):
15611                 validate_ebuild_environment(trees)
15612
15613                 # Ensure atoms are valid before calling unmerge().
15614                 # For backward compat, leading '=' is not required.
15615                 for x in myfiles:
15616                         if is_valid_package_atom(x) or \
15617                                 is_valid_package_atom("=" + x):
15618                                 continue
15619                         msg = []
15620                         msg.append("'%s' is not a valid package atom." % (x,))
15621                         msg.append("Please check ebuild(5) for full details.")
15622                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15623                                 level=logging.ERROR, noiselevel=-1)
15624                         return 1
15625
15626                 # When given a list of atoms, unmerge
15627                 # them in the order given.
15628                 ordered = myaction == "unmerge"
15629                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15630                         mtimedb["ldpath"], ordered=ordered):
15631                         if not (buildpkgonly or fetchonly or pretend):
15632                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15633
15634         elif myaction in ("depclean", "info", "prune"):
15635
15636                 # Ensure atoms are valid before calling unmerge().
15637                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15638                 valid_atoms = []
15639                 for x in myfiles:
15640                         if is_valid_package_atom(x):
15641                                 try:
15642                                         valid_atoms.append(
15643                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15644                                 except portage.exception.AmbiguousPackageName, e:
15645                                         msg = "The short ebuild name \"" + x + \
15646                                                 "\" is ambiguous.  Please specify " + \
15647                                                 "one of the following " + \
15648                                                 "fully-qualified ebuild names instead:"
15649                                         for line in textwrap.wrap(msg, 70):
15650                                                 writemsg_level("!!! %s\n" % (line,),
15651                                                         level=logging.ERROR, noiselevel=-1)
15652                                         for i in e[0]:
15653                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15654                                                         level=logging.ERROR, noiselevel=-1)
15655                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15656                                         return 1
15657                                 continue
15658                         msg = []
15659                         msg.append("'%s' is not a valid package atom." % (x,))
15660                         msg.append("Please check ebuild(5) for full details.")
15661                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15662                                 level=logging.ERROR, noiselevel=-1)
15663                         return 1
15664
15665                 if myaction == "info":
15666                         return action_info(settings, trees, myopts, valid_atoms)
15667
15668                 validate_ebuild_environment(trees)
15669                 action_depclean(settings, trees, mtimedb["ldpath"],
15670                         myopts, myaction, valid_atoms, spinner)
15671                 if not (buildpkgonly or fetchonly or pretend):
15672                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15673         # "update", "system", or just process files:
15674         else:
15675                 validate_ebuild_environment(trees)
15676                 if "--pretend" not in myopts:
15677                         display_news_notification(root_config, myopts)
15678                 retval = action_build(settings, trees, mtimedb,
15679                         myopts, myaction, myfiles, spinner)
15680                 root_config = trees[settings["ROOT"]]["root_config"]
15681                 post_emerge(root_config, myopts, mtimedb, retval)
15682
15683                 return retval