Make dyn_clean ignore FEATURES=keepwork when [[ $EMERGE_FROM = binary ]]
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59
60 try:
61         import cPickle as pickle
62 except ImportError:
63         import pickle
64
65 try:
66         from cStringIO import StringIO
67 except ImportError:
68         from StringIO import StringIO
69
70 class stdout_spinner(object):
71         scroll_msgs = [
72                 "Gentoo Rocks ("+platform.system()+")",
73                 "Thank you for using Gentoo. :)",
74                 "Are you actually trying to read this?",
75                 "How many times have you stared at this?",
76                 "We are generating the cache right now",
77                 "You are paying too much attention.",
78                 "A theory is better than its explanation.",
79                 "Phasers locked on target, Captain.",
80                 "Thrashing is just virtual crashing.",
81                 "To be is to program.",
82                 "Real Users hate Real Programmers.",
83                 "When all else fails, read the instructions.",
84                 "Functionality breeds Contempt.",
85                 "The future lies ahead.",
86                 "3.1415926535897932384626433832795028841971694",
87                 "Sometimes insanity is the only alternative.",
88                 "Inaccuracy saves a world of explanation.",
89         ]
90
91         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
92
93         def __init__(self):
94                 self.spinpos = 0
95                 self.update = self.update_twirl
96                 self.scroll_sequence = self.scroll_msgs[
97                         int(time.time() * 100) % len(self.scroll_msgs)]
98                 self.last_update = 0
99                 self.min_display_latency = 0.05
100
101         def _return_early(self):
102                 """
103                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104                 each update* method should return without doing any output when this
105                 method returns True.
106                 """
107                 cur_time = time.time()
108                 if cur_time - self.last_update < self.min_display_latency:
109                         return True
110                 self.last_update = cur_time
111                 return False
112
113         def update_basic(self):
114                 self.spinpos = (self.spinpos + 1) % 500
115                 if self._return_early():
116                         return
117                 if (self.spinpos % 100) == 0:
118                         if self.spinpos == 0:
119                                 sys.stdout.write(". ")
120                         else:
121                                 sys.stdout.write(".")
122                 sys.stdout.flush()
123
124         def update_scroll(self):
125                 if self._return_early():
126                         return
127                 if(self.spinpos >= len(self.scroll_sequence)):
128                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
130                 else:
131                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
132                 sys.stdout.flush()
133                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
134
135         def update_twirl(self):
136                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137                 if self._return_early():
138                         return
139                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
140                 sys.stdout.flush()
141
142         def update_quiet(self):
143                 return
144
145 def userquery(prompt, responses=None, colours=None):
146         """Displays a prompt and a set of responses, then waits for a response
147         which is checked against the responses and the first to match is
148         returned.  An empty response will match the first value in responses.  The
149         input buffer is *not* cleared prior to the prompt!
150
151         prompt: a String.
152         responses: a List of Strings.
153         colours: a List of Functions taking and returning a String, used to
154         process the responses for display. Typically these will be functions
155         like red() but could be e.g. lambda x: "DisplayString".
156         If responses is omitted, defaults to ["Yes", "No"], [green, red].
157         If only colours is omitted, defaults to [bold, ...].
158
159         Returns a member of the List responses. (If called without optional
160         arguments, returns "Yes" or "No".)
161         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
162         printed."""
163         if responses is None:
164                 responses = ["Yes", "No"]
165                 colours = [
166                         create_color_func("PROMPT_CHOICE_DEFAULT"),
167                         create_color_func("PROMPT_CHOICE_OTHER")
168                 ]
169         elif colours is None:
170                 colours=[bold]
171         colours=(colours*len(responses))[:len(responses)]
172         print bold(prompt),
173         try:
174                 while True:
175                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176                         for key in responses:
177                                 # An empty response will match the first value in responses.
178                                 if response.upper()==key[:len(response)].upper():
179                                         return key
180                         print "Sorry, response '%s' not understood." % response,
181         except (EOFError, KeyboardInterrupt):
182                 print "Interrupted."
183                 sys.exit(1)
184
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen",  "search",
189 "sync",  "unmerge", "version",
190 ])
191 options=[
192 "--ask",          "--alphabetical",
193 "--buildpkg",     "--buildpkgonly",
194 "--changelog",    "--columns",
195 "--complete-graph",
196 "--debug",        "--deep",
197 "--digest",
198 "--emptytree",
199 "--fetchonly",    "--fetch-all-uri",
200 "--getbinpkg",    "--getbinpkgonly",
201 "--help",         "--ignore-default-opts",
202 "--keep-going",
203 "--noconfmem",
204 "--newuse",       "--nocolor",
205 "--nodeps",       "--noreplace",
206 "--nospinner",    "--oneshot",
207 "--onlydeps",     "--pretend",
208 "--quiet",        "--resume",
209 "--searchdesc",   "--selective",
210 "--skipfirst",
211 "--tree",
212 "--update",
213 "--usepkg",       "--usepkgonly",
214 "--verbose",
215 ]
216
217 shortmapping={
218 "1":"--oneshot",
219 "a":"--ask",
220 "b":"--buildpkg",  "B":"--buildpkgonly",
221 "c":"--clean",     "C":"--unmerge",
222 "d":"--debug",     "D":"--deep",
223 "e":"--emptytree",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
226 "h":"--help",
227 "k":"--usepkg",    "K":"--usepkgonly",
228 "l":"--changelog",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps",  "O":"--nodeps",
231 "p":"--pretend",   "P":"--prune",
232 "q":"--quiet",
233 "s":"--search",    "S":"--searchdesc",
234 "t":"--tree",
235 "u":"--update",
236 "v":"--verbose",   "V":"--version"
237 }
238
239 def emergelog(xterm_titles, mystr, short_msg=None):
240         if xterm_titles and short_msg:
241                 if "HOSTNAME" in os.environ:
242                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
243                 xtermTitle(short_msg)
244         try:
245                 file_path = "/var/log/emerge.log"
246                 mylogfile = open(file_path, "a")
247                 portage.util.apply_secpass_permissions(file_path,
248                         uid=portage.portage_uid, gid=portage.portage_gid,
249                         mode=0660)
250                 mylock = None
251                 try:
252                         mylock = portage.locks.lockfile(mylogfile)
253                         # seek because we may have gotten held up by the lock.
254                         # if so, we may not be positioned at the end of the file.
255                         mylogfile.seek(0, 2)
256                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257                         mylogfile.flush()
258                 finally:
259                         if mylock:
260                                 portage.locks.unlockfile(mylock)
261                         mylogfile.close()
262         except (IOError,OSError,portage.exception.PortageException), e:
263                 if secpass >= 1:
264                         print >> sys.stderr, "emergelog():",e
265
266 def countdown(secs=5, doing="Starting"):
267         if secs:
268                 print ">>> Waiting",secs,"seconds before starting..."
269                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270                 ticks=range(secs)
271                 ticks.reverse()
272                 for sec in ticks:
273                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
274                         sys.stdout.flush()
275                         time.sleep(1)
276                 print
277
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280         if isinstance(mysize, basestring):
281                 return mysize
282         if 0 != mysize % 1024:
283                 # Always round up to the next kB so that it doesn't show 0 kB when
284                 # some small file still needs to be fetched.
285                 mysize += 1024 - mysize % 1024
286         mystr=str(mysize/1024)
287         mycount=len(mystr)
288         while (mycount > 3):
289                 mycount-=3
290                 mystr=mystr[:mycount]+","+mystr[mycount:]
291         return mystr+" kB"
292
293
294 def getgccversion(chost):
295         """
296         rtype: C{str}
297         return:  the current in-use gcc version
298         """
299
300         gcc_ver_command = 'gcc -dumpversion'
301         gcc_ver_prefix = 'gcc-'
302
303         gcc_not_found_error = red(
304         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305         "!!! to update the environment of this terminal and possibly\n" +
306         "!!! other terminals also.\n"
307         )
308
309         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312
313         mystatus, myoutput = commands.getstatusoutput(
314                 chost + "-" + gcc_ver_command)
315         if mystatus == os.EX_OK:
316                 return gcc_ver_prefix + myoutput
317
318         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319         if mystatus == os.EX_OK:
320                 return gcc_ver_prefix + myoutput
321
322         portage.writemsg(gcc_not_found_error, noiselevel=-1)
323         return "[unavailable]"
324
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326         profilever = "unavailable"
327         if profile:
328                 realpath = os.path.realpath(profile)
329                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
330                 if realpath.startswith(basepath):
331                         profilever = realpath[1 + len(basepath):]
332                 else:
333                         try:
334                                 profilever = "!" + os.readlink(profile)
335                         except (OSError):
336                                 pass
337                 del realpath, basepath
338
339         libcver=[]
340         libclist  = vardb.match("virtual/libc")
341         libclist += vardb.match("virtual/glibc")
342         libclist  = portage.util.unique_array(libclist)
343         for x in libclist:
344                 xs=portage.catpkgsplit(x)
345                 if libcver:
346                         libcver+=","+"-".join(xs[1:])
347                 else:
348                         libcver="-".join(xs[1:])
349         if libcver==[]:
350                 libcver="unavailable"
351
352         gccver = getgccversion(chost)
353         unameout=platform.release()+" "+platform.machine()
354
355         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
356
357 def create_depgraph_params(myopts, myaction):
358         #configure emerge engine parameters
359         #
360         # self:      include _this_ package regardless of if it is merged.
361         # selective: exclude the package if it is merged
362         # recurse:   go into the dependencies
363         # deep:      go into the dependencies of already merged packages
364         # empty:     pretend nothing is merged
365         # complete:  completely account for all known dependencies
366         # remove:    build graph for use in removing packages
367         myparams = set(["recurse"])
368
369         if myaction == "remove":
370                 myparams.add("remove")
371                 myparams.add("complete")
372                 return myparams
373
374         if "--update" in myopts or \
375                 "--newuse" in myopts or \
376                 "--reinstall" in myopts or \
377                 "--noreplace" in myopts:
378                 myparams.add("selective")
379         if "--emptytree" in myopts:
380                 myparams.add("empty")
381                 myparams.discard("selective")
382         if "--nodeps" in myopts:
383                 myparams.discard("recurse")
384         if "--deep" in myopts:
385                 myparams.add("deep")
386         if "--complete-graph" in myopts:
387                 myparams.add("complete")
388         return myparams
389
390 # search functionality
391 class search(object):
392
393         #
394         # class constants
395         #
396         VERSION_SHORT=1
397         VERSION_RELEASE=2
398
399         #
400         # public interface
401         #
402         def __init__(self, root_config, spinner, searchdesc,
403                 verbose, usepkg, usepkgonly):
404                 """Searches the available and installed packages for the supplied search key.
405                 The list of available and installed packages is created at object instantiation.
406                 This makes successive searches faster."""
407                 self.settings = root_config.settings
408                 self.vartree = root_config.trees["vartree"]
409                 self.spinner = spinner
410                 self.verbose = verbose
411                 self.searchdesc = searchdesc
412                 self.root_config = root_config
413                 self.setconfig = root_config.setconfig
414                 self.matches = {"pkg" : []}
415                 self.mlen = 0
416
417                 def fake_portdb():
418                         pass
419                 self.portdb = fake_portdb
420                 for attrib in ("aux_get", "cp_all",
421                         "xmatch", "findname", "getFetchMap"):
422                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
423
424                 self._dbs = []
425
426                 portdb = root_config.trees["porttree"].dbapi
427                 bindb = root_config.trees["bintree"].dbapi
428                 vardb = root_config.trees["vartree"].dbapi
429
430                 if not usepkgonly and portdb._have_root_eclass_dir:
431                         self._dbs.append(portdb)
432
433                 if (usepkg or usepkgonly) and bindb.cp_all():
434                         self._dbs.append(bindb)
435
436                 self._dbs.append(vardb)
437                 self._portdb = portdb
438
439         def _cp_all(self):
440                 cp_all = set()
441                 for db in self._dbs:
442                         cp_all.update(db.cp_all())
443                 return list(sorted(cp_all))
444
445         def _aux_get(self, *args, **kwargs):
446                 for db in self._dbs:
447                         try:
448                                 return db.aux_get(*args, **kwargs)
449                         except KeyError:
450                                 pass
451                 raise
452
453         def _findname(self, *args, **kwargs):
454                 for db in self._dbs:
455                         if db is not self._portdb:
456                                 # We don't want findname to return anything
457                                 # unless it's an ebuild in a portage tree.
458                                 # Otherwise, it's already built and we don't
459                                 # care about it.
460                                 continue
461                         func = getattr(db, "findname", None)
462                         if func:
463                                 value = func(*args, **kwargs)
464                                 if value:
465                                         return value
466                 return None
467
468         def _getFetchMap(self, *args, **kwargs):
469                 for db in self._dbs:
470                         func = getattr(db, "getFetchMap", None)
471                         if func:
472                                 value = func(*args, **kwargs)
473                                 if value:
474                                         return value
475                 return {}
476
477         def _visible(self, db, cpv, metadata):
478                 installed = db is self.vartree.dbapi
479                 built = installed or db is not self._portdb
480                 pkg_type = "ebuild"
481                 if installed:
482                         pkg_type = "installed"
483                 elif built:
484                         pkg_type = "binary"
485                 return visible(self.settings,
486                         Package(type_name=pkg_type, root_config=self.root_config,
487                         cpv=cpv, built=built, installed=installed, metadata=metadata))
488
489         def _xmatch(self, level, atom):
490                 """
491                 This method does not expand old-style virtuals because it
492                 is restricted to returning matches for a single ${CATEGORY}/${PN}
493                 and old-style virual matches unreliable for that when querying
494                 multiple package databases. If necessary, old-style virtuals
495                 can be performed on atoms prior to calling this method.
496                 """
497                 cp = portage.dep_getkey(atom)
498                 if level == "match-all":
499                         matches = set()
500                         for db in self._dbs:
501                                 if hasattr(db, "xmatch"):
502                                         matches.update(db.xmatch(level, atom))
503                                 else:
504                                         matches.update(db.match(atom))
505                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506                         db._cpv_sort_ascending(result)
507                 elif level == "match-visible":
508                         matches = set()
509                         for db in self._dbs:
510                                 if hasattr(db, "xmatch"):
511                                         matches.update(db.xmatch(level, atom))
512                                 else:
513                                         db_keys = list(db._aux_cache_keys)
514                                         for cpv in db.match(atom):
515                                                 metadata = izip(db_keys,
516                                                         db.aux_get(cpv, db_keys))
517                                                 if not self._visible(db, cpv, metadata):
518                                                         continue
519                                                 matches.add(cpv)
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "bestmatch-visible":
523                         result = None
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         cpv = db.xmatch("bestmatch-visible", atom)
527                                         if not cpv or portage.cpv_getkey(cpv) != cp:
528                                                 continue
529                                         if not result or cpv == portage.best([cpv, result]):
530                                                 result = cpv
531                                 else:
532                                         db_keys = Package.metadata_keys
533                                         # break out of this loop with highest visible
534                                         # match, checked in descending order
535                                         for cpv in reversed(db.match(atom)):
536                                                 if portage.cpv_getkey(cpv) != cp:
537                                                         continue
538                                                 metadata = izip(db_keys,
539                                                         db.aux_get(cpv, db_keys))
540                                                 if not self._visible(db, cpv, metadata):
541                                                         continue
542                                                 if not result or cpv == portage.best([cpv, result]):
543                                                         result = cpv
544                                                 break
545                 else:
546                         raise NotImplementedError(level)
547                 return result
548
549         def execute(self,searchkey):
550                 """Performs the search for the supplied search key"""
551                 match_category = 0
552                 self.searchkey=searchkey
553                 self.packagematches = []
554                 if self.searchdesc:
555                         self.searchdesc=1
556                         self.matches = {"pkg":[], "desc":[], "set":[]}
557                 else:
558                         self.searchdesc=0
559                         self.matches = {"pkg":[], "set":[]}
560                 print "Searching...   ",
561
562                 regexsearch = False
563                 if self.searchkey.startswith('%'):
564                         regexsearch = True
565                         self.searchkey = self.searchkey[1:]
566                 if self.searchkey.startswith('@'):
567                         match_category = 1
568                         self.searchkey = self.searchkey[1:]
569                 if regexsearch:
570                         self.searchre=re.compile(self.searchkey,re.I)
571                 else:
572                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
573                 for package in self.portdb.cp_all():
574                         self.spinner.update()
575
576                         if match_category:
577                                 match_string  = package[:]
578                         else:
579                                 match_string  = package.split("/")[-1]
580
581                         masked=0
582                         if self.searchre.search(match_string):
583                                 if not self.portdb.xmatch("match-visible", package):
584                                         masked=1
585                                 self.matches["pkg"].append([package,masked])
586                         elif self.searchdesc: # DESCRIPTION searching
587                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
588                                 if not full_package:
589                                         #no match found; we don't want to query description
590                                         full_package = portage.best(
591                                                 self.portdb.xmatch("match-all", package))
592                                         if not full_package:
593                                                 continue
594                                         else:
595                                                 masked=1
596                                 try:
597                                         full_desc = self.portdb.aux_get(
598                                                 full_package, ["DESCRIPTION"])[0]
599                                 except KeyError:
600                                         print "emerge: search: aux_get() failed, skipping"
601                                         continue
602                                 if self.searchre.search(full_desc):
603                                         self.matches["desc"].append([full_package,masked])
604
605                 self.sdict = self.setconfig.getSets()
606                 for setname in self.sdict:
607                         self.spinner.update()
608                         if match_category:
609                                 match_string = setname
610                         else:
611                                 match_string = setname.split("/")[-1]
612                         
613                         if self.searchre.search(match_string):
614                                 self.matches["set"].append([setname, False])
615                         elif self.searchdesc:
616                                 if self.searchre.search(
617                                         self.sdict[setname].getMetadata("DESCRIPTION")):
618                                         self.matches["set"].append([setname, False])
619                         
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 elif mtype == "set":
656                                         print green("*")+"  "+white(match)
657                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
658                                         print
659                                 if full_package:
660                                         try:
661                                                 desc, homepage, license = self.portdb.aux_get(
662                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
663                                         except KeyError:
664                                                 print "emerge: search: aux_get() failed, skipping"
665                                                 continue
666                                         if masked:
667                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
668                                         else:
669                                                 print green("*")+"  "+white(match)
670                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
671
672                                         mysum = [0,0]
673                                         file_size_str = None
674                                         mycat = match.split("/")[0]
675                                         mypkg = match.split("/")[1]
676                                         mycpv = match + "-" + myversion
677                                         myebuild = self.portdb.findname(mycpv)
678                                         if myebuild:
679                                                 pkgdir = os.path.dirname(myebuild)
680                                                 from portage import manifest
681                                                 mf = manifest.Manifest(
682                                                         pkgdir, self.settings["DISTDIR"])
683                                                 try:
684                                                         uri_map = self.portdb.getFetchMap(mycpv)
685                                                 except portage.exception.InvalidDependString, e:
686                                                         file_size_str = "Unknown (%s)" % (e,)
687                                                         del e
688                                                 else:
689                                                         try:
690                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
691                                                         except KeyError, e:
692                                                                 file_size_str = "Unknown (missing " + \
693                                                                         "digest for %s)" % (e,)
694                                                                 del e
695
696                                         available = False
697                                         for db in self._dbs:
698                                                 if db is not vardb and \
699                                                         db.cpv_exists(mycpv):
700                                                         available = True
701                                                         if not myebuild and hasattr(db, "bintree"):
702                                                                 myebuild = db.bintree.getname(mycpv)
703                                                                 try:
704                                                                         mysum[0] = os.stat(myebuild).st_size
705                                                                 except OSError:
706                                                                         myebuild = None
707                                                         break
708
709                                         if myebuild and file_size_str is None:
710                                                 mystr = str(mysum[0] / 1024)
711                                                 mycount = len(mystr)
712                                                 while (mycount > 3):
713                                                         mycount -= 3
714                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
715                                                 file_size_str = mystr + " kB"
716
717                                         if self.verbose:
718                                                 if available:
719                                                         print "     ", darkgreen("Latest version available:"),myversion
720                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
721                                                 if myebuild:
722                                                         print "      %s %s" % \
723                                                                 (darkgreen("Size of files:"), file_size_str)
724                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
725                                                 print "     ", darkgreen("Description:")+"  ",desc
726                                                 print "     ", darkgreen("License:")+"      ",license
727                                                 print
728         #
729         # private interface
730         #
731         def getInstallationStatus(self,package):
732                 installed_package = self.vartree.dep_bestmatch(package)
733                 result = ""
734                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
735                 if len(version) > 0:
736                         result = darkgreen("Latest version installed:")+" "+version
737                 else:
738                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
739                 return result
740
741         def getVersion(self,full_package,detail):
742                 if len(full_package) > 1:
743                         package_parts = portage.catpkgsplit(full_package)
744                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745                                 result = package_parts[2]+ "-" + package_parts[3]
746                         else:
747                                 result = package_parts[2]
748                 else:
749                         result = ""
750                 return result
751
752 class RootConfig(object):
753         """This is used internally by depgraph to track information about a
754         particular $ROOT."""
755
756         pkg_tree_map = {
757                 "ebuild"    : "porttree",
758                 "binary"    : "bintree",
759                 "installed" : "vartree"
760         }
761
762         tree_pkg_map = {}
763         for k, v in pkg_tree_map.iteritems():
764                 tree_pkg_map[v] = k
765
766         def __init__(self, settings, trees, setconfig):
767                 self.trees = trees
768                 self.settings = settings
769                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770                 self.root = self.settings["ROOT"]
771                 self.setconfig = setconfig
772                 self.sets = self.setconfig.getSets()
773                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
774
775 def create_world_atom(pkg, args_set, root_config):
776         """Create a new atom for the world file if one does not exist.  If the
777         argument atom is precise enough to identify a specific slot then a slot
778         atom will be returned. Atoms that are in the system set may also be stored
779         in world since system atoms can only match one slot while world atoms can
780         be greedy with respect to slots.  Unslotted system packages will not be
781         stored in world."""
782
783         arg_atom = args_set.findAtomForPackage(pkg)
784         if not arg_atom:
785                 return None
786         cp = portage.dep_getkey(arg_atom)
787         new_world_atom = cp
788         sets = root_config.sets
789         portdb = root_config.trees["porttree"].dbapi
790         vardb = root_config.trees["vartree"].dbapi
791         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792                 for cpv in portdb.match(cp))
793         slotted = len(available_slots) > 1 or \
794                 (len(available_slots) == 1 and "0" not in available_slots)
795         if not slotted:
796                 # check the vdb in case this is multislot
797                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798                         for cpv in vardb.match(cp))
799                 slotted = len(available_slots) > 1 or \
800                         (len(available_slots) == 1 and "0" not in available_slots)
801         if slotted and arg_atom != cp:
802                 # If the user gave a specific atom, store it as a
803                 # slot atom in the world file.
804                 slot_atom = pkg.slot_atom
805
806                 # For USE=multislot, there are a couple of cases to
807                 # handle here:
808                 #
809                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810                 #    unknown value, so just record an unslotted atom.
811                 #
812                 # 2) SLOT comes from an installed package and there is no
813                 #    matching SLOT in the portage tree.
814                 #
815                 # Make sure that the slot atom is available in either the
816                 # portdb or the vardb, since otherwise the user certainly
817                 # doesn't want the SLOT atom recorded in the world file
818                 # (case 1 above).  If it's only available in the vardb,
819                 # the user may be trying to prevent a USE=multislot
820                 # package from being removed by --depclean (case 2 above).
821
822                 mydb = portdb
823                 if not portdb.match(slot_atom):
824                         # SLOT seems to come from an installed multislot package
825                         mydb = vardb
826                 # If there is no installed package matching the SLOT atom,
827                 # it probably changed SLOT spontaneously due to USE=multislot,
828                 # so just record an unslotted atom.
829                 if vardb.match(slot_atom):
830                         # Now verify that the argument is precise
831                         # enough to identify a specific slot.
832                         matches = mydb.match(arg_atom)
833                         matched_slots = set()
834                         for cpv in matches:
835                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836                         if len(matched_slots) == 1:
837                                 new_world_atom = slot_atom
838
839         if new_world_atom == sets["world"].findAtomForPackage(pkg):
840                 # Both atoms would be identical, so there's nothing to add.
841                 return None
842         if not slotted:
843                 # Unlike world atoms, system atoms are not greedy for slots, so they
844                 # can't be safely excluded from world if they are slotted.
845                 system_atom = sets["system"].findAtomForPackage(pkg)
846                 if system_atom:
847                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
848                                 return None
849                         # System virtuals aren't safe to exclude from world since they can
850                         # match multiple old-style virtuals but only one of them will be
851                         # pulled in by update or depclean.
852                         providers = portdb.mysettings.getvirtuals().get(
853                                 portage.dep_getkey(system_atom))
854                         if providers and len(providers) == 1 and providers[0] == cp:
855                                 return None
856         return new_world_atom
857
858 def filter_iuse_defaults(iuse):
859         for flag in iuse:
860                 if flag.startswith("+") or flag.startswith("-"):
861                         yield flag[1:]
862                 else:
863                         yield flag
864
865 class SlotObject(object):
866         __slots__ = ("__weakref__",)
867
868         def __init__(self, **kwargs):
869                 classes = [self.__class__]
870                 while classes:
871                         c = classes.pop()
872                         if c is SlotObject:
873                                 continue
874                         classes.extend(c.__bases__)
875                         slots = getattr(c, "__slots__", None)
876                         if not slots:
877                                 continue
878                         for myattr in slots:
879                                 myvalue = kwargs.get(myattr, None)
880                                 setattr(self, myattr, myvalue)
881
882         def copy(self):
883                 """
884                 Create a new instance and copy all attributes
885                 defined from __slots__ (including those from
886                 inherited classes).
887                 """
888                 obj = self.__class__()
889
890                 classes = [self.__class__]
891                 while classes:
892                         c = classes.pop()
893                         if c is SlotObject:
894                                 continue
895                         classes.extend(c.__bases__)
896                         slots = getattr(c, "__slots__", None)
897                         if not slots:
898                                 continue
899                         for myattr in slots:
900                                 setattr(obj, myattr, getattr(self, myattr))
901
902                 return obj
903
904 class AbstractDepPriority(SlotObject):
905         __slots__ = ("buildtime", "runtime", "runtime_post")
906
907         def __lt__(self, other):
908                 return self.__int__() < other
909
910         def __le__(self, other):
911                 return self.__int__() <= other
912
913         def __eq__(self, other):
914                 return self.__int__() == other
915
916         def __ne__(self, other):
917                 return self.__int__() != other
918
919         def __gt__(self, other):
920                 return self.__int__() > other
921
922         def __ge__(self, other):
923                 return self.__int__() >= other
924
925         def copy(self):
926                 import copy
927                 return copy.copy(self)
928
929 class DepPriority(AbstractDepPriority):
930
931         __slots__ = ("satisfied", "optional", "rebuild")
932
933         def __int__(self):
934                 return 0
935
936         def __str__(self):
937                 if self.optional:
938                         return "optional"
939                 if self.buildtime:
940                         return "buildtime"
941                 if self.runtime:
942                         return "runtime"
943                 if self.runtime_post:
944                         return "runtime_post"
945                 return "soft"
946
947 class BlockerDepPriority(DepPriority):
948         __slots__ = ()
949         def __int__(self):
950                 return 0
951
952         def __str__(self):
953                 return 'blocker'
954
955 BlockerDepPriority.instance = BlockerDepPriority()
956
957 class UnmergeDepPriority(AbstractDepPriority):
958         __slots__ = ("optional", "satisfied",)
959         """
960         Combination of properties           Priority  Category
961
962         runtime                                0       HARD
963         runtime_post                          -1       HARD
964         buildtime                             -2       SOFT
965         (none of the above)                   -2       SOFT
966         """
967
968         MAX    =  0
969         SOFT   = -2
970         MIN    = -2
971
972         def __int__(self):
973                 if self.runtime:
974                         return 0
975                 if self.runtime_post:
976                         return -1
977                 if self.buildtime:
978                         return -2
979                 return -2
980
981         def __str__(self):
982                 myvalue = self.__int__()
983                 if myvalue > self.SOFT:
984                         return "hard"
985                 return "soft"
986
987 class DepPriorityNormalRange(object):
988         """
989         DepPriority properties              Index      Category
990
991         buildtime                                      HARD
992         runtime                                3       MEDIUM
993         runtime_post                           2       MEDIUM_SOFT
994         optional                               1       SOFT
995         (none of the above)                    0       NONE
996         """
997         MEDIUM      = 3
998         MEDIUM_SOFT = 2
999         SOFT        = 1
1000         NONE        = 0
1001
1002         @classmethod
1003         def _ignore_optional(cls, priority):
1004                 if priority.__class__ is not DepPriority:
1005                         return False
1006                 return bool(priority.optional)
1007
1008         @classmethod
1009         def _ignore_runtime_post(cls, priority):
1010                 if priority.__class__ is not DepPriority:
1011                         return False
1012                 return bool(priority.optional or priority.runtime_post)
1013
1014         @classmethod
1015         def _ignore_runtime(cls, priority):
1016                 if priority.__class__ is not DepPriority:
1017                         return False
1018                 return not priority.buildtime
1019
1020         ignore_medium      = _ignore_runtime
1021         ignore_medium_soft = _ignore_runtime_post
1022         ignore_soft        = _ignore_optional
1023
1024 DepPriorityNormalRange.ignore_priority = (
1025         None,
1026         DepPriorityNormalRange._ignore_optional,
1027         DepPriorityNormalRange._ignore_runtime_post,
1028         DepPriorityNormalRange._ignore_runtime
1029 )
1030
1031 class DepPrioritySatisfiedRange(object):
1032         """
1033         DepPriority                         Index      Category
1034
1035         not satisfied and buildtime                    HARD
1036         not satisfied and runtime              7       MEDIUM
1037         not satisfied and runtime_post         6       MEDIUM_SOFT
1038         satisfied and buildtime and rebuild    5       SOFT
1039         satisfied and buildtime                4       SOFT
1040         satisfied and runtime                  3       SOFT
1041         satisfied and runtime_post             2       SOFT
1042         optional                               1       SOFT
1043         (none of the above)                    0       NONE
1044         """
1045         MEDIUM      = 7
1046         MEDIUM_SOFT = 6
1047         SOFT        = 5
1048         NONE        = 0
1049
1050         @classmethod
1051         def _ignore_optional(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional)
1055
1056         @classmethod
1057         def _ignore_satisfied_runtime_post(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 if priority.optional:
1061                         return True
1062                 if not priority.satisfied:
1063                         return False
1064                 return bool(priority.runtime_post)
1065
1066         @classmethod
1067         def _ignore_satisfied_runtime(cls, priority):
1068                 if priority.__class__ is not DepPriority:
1069                         return False
1070                 if priority.optional:
1071                         return True
1072                 if not priority.satisfied:
1073                         return False
1074                 return not priority.buildtime
1075
1076         @classmethod
1077         def _ignore_satisfied_buildtime(cls, priority):
1078                 if priority.__class__ is not DepPriority:
1079                         return False
1080                 if priority.optional:
1081                         return True
1082                 if not priority.satisfied:
1083                         return False
1084                 if priority.buildtime:
1085                         return not priority.rebuild
1086                 return True
1087
1088         @classmethod
1089         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090                 if priority.__class__ is not DepPriority:
1091                         return False
1092                 if priority.optional:
1093                         return True
1094                 return bool(priority.satisfied)
1095
1096         @classmethod
1097         def _ignore_runtime_post(cls, priority):
1098                 if priority.__class__ is not DepPriority:
1099                         return False
1100                 return bool(priority.optional or \
1101                         priority.satisfied or \
1102                         priority.runtime_post)
1103
1104         @classmethod
1105         def _ignore_runtime(cls, priority):
1106                 if priority.__class__ is not DepPriority:
1107                         return False
1108                 return bool(priority.satisfied or \
1109                         not priority.buildtime)
1110
1111         ignore_medium      = _ignore_runtime
1112         ignore_medium_soft = _ignore_runtime_post
1113         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1114
1115 DepPrioritySatisfiedRange.ignore_priority = (
1116         None,
1117         DepPrioritySatisfiedRange._ignore_optional,
1118         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122         DepPrioritySatisfiedRange._ignore_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_runtime
1124 )
1125
1126 def _find_deep_system_runtime_deps(graph):
1127         deep_system_deps = set()
1128         node_stack = []
1129         for node in graph:
1130                 if not isinstance(node, Package) or \
1131                         node.operation == 'uninstall':
1132                         continue
1133                 if node.root_config.sets['system'].findAtomForPackage(node):
1134                         node_stack.append(node)
1135
1136         def ignore_priority(priority):
1137                 """
1138                 Ignore non-runtime priorities.
1139                 """
1140                 if isinstance(priority, DepPriority) and \
1141                         (priority.runtime or priority.runtime_post):
1142                         return False
1143                 return True
1144
1145         while node_stack:
1146                 node = node_stack.pop()
1147                 if node in deep_system_deps:
1148                         continue
1149                 deep_system_deps.add(node)
1150                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151                         if not isinstance(child, Package) or \
1152                                 child.operation == 'uninstall':
1153                                 continue
1154                         node_stack.append(child)
1155
1156         return deep_system_deps
1157
1158 class FakeVartree(portage.vartree):
1159         """This is implements an in-memory copy of a vartree instance that provides
1160         all the interfaces required for use by the depgraph.  The vardb is locked
1161         during the constructor call just long enough to read a copy of the
1162         installed package information.  This allows the depgraph to do it's
1163         dependency calculations without holding a lock on the vardb.  It also
1164         allows things like vardb global updates to be done in memory so that the
1165         user doesn't necessarily need write access to the vardb in cases where
1166         global updates are necessary (updates are performed when necessary if there
1167         is not a matching ebuild in the tree)."""
1168         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169                 self._root_config = root_config
1170                 if pkg_cache is None:
1171                         pkg_cache = {}
1172                 real_vartree = root_config.trees["vartree"]
1173                 portdb = root_config.trees["porttree"].dbapi
1174                 self.root = real_vartree.root
1175                 self.settings = real_vartree.settings
1176                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177                 if "_mtime_" not in mykeys:
1178                         mykeys.append("_mtime_")
1179                 self._db_keys = mykeys
1180                 self._pkg_cache = pkg_cache
1181                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1183                 try:
1184                         # At least the parent needs to exist for the lock file.
1185                         portage.util.ensure_dirs(vdb_path)
1186                 except portage.exception.PortageException:
1187                         pass
1188                 vdb_lock = None
1189                 try:
1190                         if acquire_lock and os.access(vdb_path, os.W_OK):
1191                                 vdb_lock = portage.locks.lockdir(vdb_path)
1192                         real_dbapi = real_vartree.dbapi
1193                         slot_counters = {}
1194                         for cpv in real_dbapi.cpv_all():
1195                                 cache_key = ("installed", self.root, cpv, "nomerge")
1196                                 pkg = self._pkg_cache.get(cache_key)
1197                                 if pkg is not None:
1198                                         metadata = pkg.metadata
1199                                 else:
1200                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201                                 myslot = metadata["SLOT"]
1202                                 mycp = portage.dep_getkey(cpv)
1203                                 myslot_atom = "%s:%s" % (mycp, myslot)
1204                                 try:
1205                                         mycounter = long(metadata["COUNTER"])
1206                                 except ValueError:
1207                                         mycounter = 0
1208                                         metadata["COUNTER"] = str(mycounter)
1209                                 other_counter = slot_counters.get(myslot_atom, None)
1210                                 if other_counter is not None:
1211                                         if other_counter > mycounter:
1212                                                 continue
1213                                 slot_counters[myslot_atom] = mycounter
1214                                 if pkg is None:
1215                                         pkg = Package(built=True, cpv=cpv,
1216                                                 installed=True, metadata=metadata,
1217                                                 root_config=root_config, type_name="installed")
1218                                 self._pkg_cache[pkg] = pkg
1219                                 self.dbapi.cpv_inject(pkg)
1220                         real_dbapi.flush_cache()
1221                 finally:
1222                         if vdb_lock:
1223                                 portage.locks.unlockdir(vdb_lock)
1224                 # Populate the old-style virtuals using the cached values.
1225                 if not self.settings.treeVirtuals:
1226                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227                                 portage.getCPFromCPV, self.get_all_provides())
1228
1229                 # Intialize variables needed for lazy cache pulls of the live ebuild
1230                 # metadata.  This ensures that the vardb lock is released ASAP, without
1231                 # being delayed in case cache generation is triggered.
1232                 self._aux_get = self.dbapi.aux_get
1233                 self.dbapi.aux_get = self._aux_get_wrapper
1234                 self._match = self.dbapi.match
1235                 self.dbapi.match = self._match_wrapper
1236                 self._aux_get_history = set()
1237                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238                 self._portdb = portdb
1239                 self._global_updates = None
1240
1241         def _match_wrapper(self, cpv, use_cache=1):
1242                 """
1243                 Make sure the metadata in Package instances gets updated for any
1244                 cpv that is returned from a match() call, since the metadata can
1245                 be accessed directly from the Package instance instead of via
1246                 aux_get().
1247                 """
1248                 matches = self._match(cpv, use_cache=use_cache)
1249                 for cpv in matches:
1250                         if cpv in self._aux_get_history:
1251                                 continue
1252                         self._aux_get_wrapper(cpv, [])
1253                 return matches
1254
1255         def _aux_get_wrapper(self, pkg, wants):
1256                 if pkg in self._aux_get_history:
1257                         return self._aux_get(pkg, wants)
1258                 self._aux_get_history.add(pkg)
1259                 try:
1260                         # Use the live ebuild metadata if possible.
1261                         live_metadata = dict(izip(self._portdb_keys,
1262                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1263                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1264                                 raise KeyError(pkg)
1265                         self.dbapi.aux_update(pkg, live_metadata)
1266                 except (KeyError, portage.exception.PortageException):
1267                         if self._global_updates is None:
1268                                 self._global_updates = \
1269                                         grab_global_updates(self._portdb.porttree_root)
1270                         perform_global_updates(
1271                                 pkg, self.dbapi, self._global_updates)
1272                 return self._aux_get(pkg, wants)
1273
1274         def sync(self, acquire_lock=1):
1275                 """
1276                 Call this method to synchronize state with the real vardb
1277                 after one or more packages may have been installed or
1278                 uninstalled.
1279                 """
1280                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1281                 try:
1282                         # At least the parent needs to exist for the lock file.
1283                         portage.util.ensure_dirs(vdb_path)
1284                 except portage.exception.PortageException:
1285                         pass
1286                 vdb_lock = None
1287                 try:
1288                         if acquire_lock and os.access(vdb_path, os.W_OK):
1289                                 vdb_lock = portage.locks.lockdir(vdb_path)
1290                         self._sync()
1291                 finally:
1292                         if vdb_lock:
1293                                 portage.locks.unlockdir(vdb_lock)
1294
1295         def _sync(self):
1296
1297                 real_vardb = self._root_config.trees["vartree"].dbapi
1298                 current_cpv_set = frozenset(real_vardb.cpv_all())
1299                 pkg_vardb = self.dbapi
1300                 aux_get_history = self._aux_get_history
1301
1302                 # Remove any packages that have been uninstalled.
1303                 for pkg in list(pkg_vardb):
1304                         if pkg.cpv not in current_cpv_set:
1305                                 pkg_vardb.cpv_remove(pkg)
1306                                 aux_get_history.discard(pkg.cpv)
1307
1308                 # Validate counters and timestamps.
1309                 slot_counters = {}
1310                 root = self.root
1311                 validation_keys = ["COUNTER", "_mtime_"]
1312                 for cpv in current_cpv_set:
1313
1314                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1315                         pkg = pkg_vardb.get(pkg_hash_key)
1316                         if pkg is not None:
1317                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1318                                 try:
1319                                         counter = long(counter)
1320                                 except ValueError:
1321                                         counter = 0
1322
1323                                 if counter != pkg.counter or \
1324                                         mtime != pkg.mtime:
1325                                         pkg_vardb.cpv_remove(pkg)
1326                                         aux_get_history.discard(pkg.cpv)
1327                                         pkg = None
1328
1329                         if pkg is None:
1330                                 pkg = self._pkg(cpv)
1331
1332                         other_counter = slot_counters.get(pkg.slot_atom)
1333                         if other_counter is not None:
1334                                 if other_counter > pkg.counter:
1335                                         continue
1336
1337                         slot_counters[pkg.slot_atom] = pkg.counter
1338                         pkg_vardb.cpv_inject(pkg)
1339
1340                 real_vardb.flush_cache()
1341
1342         def _pkg(self, cpv):
1343                 root_config = self._root_config
1344                 real_vardb = root_config.trees["vartree"].dbapi
1345                 pkg = Package(cpv=cpv, installed=True,
1346                         metadata=izip(self._db_keys,
1347                         real_vardb.aux_get(cpv, self._db_keys)),
1348                         root_config=root_config,
1349                         type_name="installed")
1350
1351                 try:
1352                         mycounter = long(pkg.metadata["COUNTER"])
1353                 except ValueError:
1354                         mycounter = 0
1355                         pkg.metadata["COUNTER"] = str(mycounter)
1356
1357                 return pkg
1358
1359 def grab_global_updates(portdir):
1360         from portage.update import grab_updates, parse_updates
1361         updpath = os.path.join(portdir, "profiles", "updates")
1362         try:
1363                 rawupdates = grab_updates(updpath)
1364         except portage.exception.DirectoryNotFound:
1365                 rawupdates = []
1366         upd_commands = []
1367         for mykey, mystat, mycontent in rawupdates:
1368                 commands, errors = parse_updates(mycontent)
1369                 upd_commands.extend(commands)
1370         return upd_commands
1371
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373         from portage.update import update_dbentries
1374         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376         updates = update_dbentries(mycommands, aux_dict)
1377         if updates:
1378                 mydb.aux_update(mycpv, updates)
1379
1380 def visible(pkgsettings, pkg):
1381         """
1382         Check if a package is visible. This can raise an InvalidDependString
1383         exception if LICENSE is invalid.
1384         TODO: optionally generate a list of masking reasons
1385         @rtype: Boolean
1386         @returns: True if the package is visible, False otherwise.
1387         """
1388         if not pkg.metadata["SLOT"]:
1389                 return False
1390         if not pkg.installed:
1391                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1392                         return False
1393         eapi = pkg.metadata["EAPI"]
1394         if not portage.eapi_is_supported(eapi):
1395                 return False
1396         if not pkg.installed:
1397                 if portage._eapi_is_deprecated(eapi):
1398                         return False
1399                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1400                         return False
1401         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1402                 return False
1403         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1404                 return False
1405         try:
1406                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1407                         return False
1408         except portage.exception.InvalidDependString:
1409                 return False
1410         return True
1411
1412 def get_masking_status(pkg, pkgsettings, root_config):
1413
1414         mreasons = portage.getmaskingstatus(
1415                 pkg, settings=pkgsettings,
1416                 portdb=root_config.trees["porttree"].dbapi)
1417
1418         if not pkg.installed:
1419                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420                         mreasons.append("CHOST: %s" % \
1421                                 pkg.metadata["CHOST"])
1422
1423         if not pkg.metadata["SLOT"]:
1424                 mreasons.append("invalid: SLOT is undefined")
1425
1426         return mreasons
1427
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429         db, pkg_type, built, installed, db_keys):
1430         eapi_masked = False
1431         try:
1432                 metadata = dict(izip(db_keys,
1433                         db.aux_get(cpv, db_keys)))
1434         except KeyError:
1435                 metadata = None
1436         if metadata and not built:
1437                 pkgsettings.setcpv(cpv, mydb=metadata)
1438                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440         if metadata is None:
1441                 mreasons = ["corruption"]
1442         else:
1443                 pkg = Package(type_name=pkg_type, root_config=root_config,
1444                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1445                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446         return metadata, mreasons
1447
1448 def show_masked_packages(masked_packages):
1449         shown_licenses = set()
1450         shown_comments = set()
1451         # Maybe there is both an ebuild and a binary. Only
1452         # show one of them to avoid redundant appearance.
1453         shown_cpvs = set()
1454         have_eapi_mask = False
1455         for (root_config, pkgsettings, cpv,
1456                 metadata, mreasons) in masked_packages:
1457                 if cpv in shown_cpvs:
1458                         continue
1459                 shown_cpvs.add(cpv)
1460                 comment, filename = None, None
1461                 if "package.mask" in mreasons:
1462                         comment, filename = \
1463                                 portage.getmaskingreason(
1464                                 cpv, metadata=metadata,
1465                                 settings=pkgsettings,
1466                                 portdb=root_config.trees["porttree"].dbapi,
1467                                 return_location=True)
1468                 missing_licenses = []
1469                 if metadata:
1470                         if not portage.eapi_is_supported(metadata["EAPI"]):
1471                                 have_eapi_mask = True
1472                         try:
1473                                 missing_licenses = \
1474                                         pkgsettings._getMissingLicenses(
1475                                                 cpv, metadata)
1476                         except portage.exception.InvalidDependString:
1477                                 # This will have already been reported
1478                                 # above via mreasons.
1479                                 pass
1480
1481                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482                 if comment and comment not in shown_comments:
1483                         print filename+":"
1484                         print comment
1485                         shown_comments.add(comment)
1486                 portdb = root_config.trees["porttree"].dbapi
1487                 for l in missing_licenses:
1488                         l_path = portdb.findLicensePath(l)
1489                         if l in shown_licenses:
1490                                 continue
1491                         msg = ("A copy of the '%s' license" + \
1492                         " is located at '%s'.") % (l, l_path)
1493                         print msg
1494                         print
1495                         shown_licenses.add(l)
1496         return have_eapi_mask
1497
1498 class Task(SlotObject):
1499         __slots__ = ("_hash_key", "_hash_value")
1500
1501         def _get_hash_key(self):
1502                 hash_key = getattr(self, "_hash_key", None)
1503                 if hash_key is None:
1504                         raise NotImplementedError(self)
1505                 return hash_key
1506
1507         def __eq__(self, other):
1508                 return self._get_hash_key() == other
1509
1510         def __ne__(self, other):
1511                 return self._get_hash_key() != other
1512
1513         def __hash__(self):
1514                 hash_value = getattr(self, "_hash_value", None)
1515                 if hash_value is None:
1516                         self._hash_value = hash(self._get_hash_key())
1517                 return self._hash_value
1518
1519         def __len__(self):
1520                 return len(self._get_hash_key())
1521
1522         def __getitem__(self, key):
1523                 return self._get_hash_key()[key]
1524
1525         def __iter__(self):
1526                 return iter(self._get_hash_key())
1527
1528         def __contains__(self, key):
1529                 return key in self._get_hash_key()
1530
1531         def __str__(self):
1532                 return str(self._get_hash_key())
1533
1534 class Blocker(Task):
1535
1536         __hash__ = Task.__hash__
1537         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538
1539         def __init__(self, **kwargs):
1540                 Task.__init__(self, **kwargs)
1541                 self.cp = portage.dep_getkey(self.atom)
1542
1543         def _get_hash_key(self):
1544                 hash_key = getattr(self, "_hash_key", None)
1545                 if hash_key is None:
1546                         self._hash_key = \
1547                                 ("blocks", self.root, self.atom, self.eapi)
1548                 return self._hash_key
1549
1550 class Package(Task):
1551
1552         __hash__ = Task.__hash__
1553         __slots__ = ("built", "cpv", "depth",
1554                 "installed", "metadata", "onlydeps", "operation",
1555                 "root_config", "type_name",
1556                 "category", "counter", "cp", "cpv_split",
1557                 "inherited", "iuse", "mtime",
1558                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1559
1560         metadata_keys = [
1561                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562                 "INHERITED", "IUSE", "KEYWORDS",
1563                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565
1566         def __init__(self, **kwargs):
1567                 Task.__init__(self, **kwargs)
1568                 self.root = self.root_config.root
1569                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570                 self.cp = portage.cpv_getkey(self.cpv)
1571                 slot = self.slot
1572                 if not slot:
1573                         # Avoid an InvalidAtom exception when creating slot_atom.
1574                         # This package instance will be masked due to empty SLOT.
1575                         slot = '0'
1576                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577                 self.category, self.pf = portage.catsplit(self.cpv)
1578                 self.cpv_split = portage.catpkgsplit(self.cpv)
1579                 self.pv_split = self.cpv_split[1:]
1580
1581         class _use(object):
1582
1583                 __slots__ = ("__weakref__", "enabled")
1584
1585                 def __init__(self, use):
1586                         self.enabled = frozenset(use)
1587
1588         class _iuse(object):
1589
1590                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1591
1592                 def __init__(self, tokens, iuse_implicit):
1593                         self.tokens = tuple(tokens)
1594                         self.iuse_implicit = iuse_implicit
1595                         enabled = []
1596                         disabled = []
1597                         other = []
1598                         for x in tokens:
1599                                 prefix = x[:1]
1600                                 if prefix == "+":
1601                                         enabled.append(x[1:])
1602                                 elif prefix == "-":
1603                                         disabled.append(x[1:])
1604                                 else:
1605                                         other.append(x)
1606                         self.enabled = frozenset(enabled)
1607                         self.disabled = frozenset(disabled)
1608                         self.all = frozenset(chain(enabled, disabled, other))
1609
1610                 def __getattribute__(self, name):
1611                         if name == "regex":
1612                                 try:
1613                                         return object.__getattribute__(self, "regex")
1614                                 except AttributeError:
1615                                         all = object.__getattribute__(self, "all")
1616                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617                                         # Escape anything except ".*" which is supposed
1618                                         # to pass through from _get_implicit_iuse()
1619                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620                                         regex = "^(%s)$" % "|".join(regex)
1621                                         regex = regex.replace("\\.\\*", ".*")
1622                                         self.regex = re.compile(regex)
1623                         return object.__getattribute__(self, name)
1624
1625         def _get_hash_key(self):
1626                 hash_key = getattr(self, "_hash_key", None)
1627                 if hash_key is None:
1628                         if self.operation is None:
1629                                 self.operation = "merge"
1630                                 if self.onlydeps or self.installed:
1631                                         self.operation = "nomerge"
1632                         self._hash_key = \
1633                                 (self.type_name, self.root, self.cpv, self.operation)
1634                 return self._hash_key
1635
1636         def __lt__(self, other):
1637                 if other.cp != self.cp:
1638                         return False
1639                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1640                         return True
1641                 return False
1642
1643         def __le__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1647                         return True
1648                 return False
1649
1650         def __gt__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1654                         return True
1655                 return False
1656
1657         def __ge__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1661                         return True
1662                 return False
1663
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665         if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1668
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1671
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1673         """
1674         Detect metadata updates and synchronize Package attributes.
1675         """
1676
1677         __slots__ = ("_pkg",)
1678         _wrapped_keys = frozenset(
1679                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1680
1681         def __init__(self, pkg, metadata):
1682                 _PackageMetadataWrapperBase.__init__(self)
1683                 self._pkg = pkg
1684                 self.update(metadata)
1685
1686         def __setitem__(self, k, v):
1687                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688                 if k in self._wrapped_keys:
1689                         getattr(self, "_set_" + k.lower())(k, v)
1690
1691         def _set_inherited(self, k, v):
1692                 if isinstance(v, basestring):
1693                         v = frozenset(v.split())
1694                 self._pkg.inherited = v
1695
1696         def _set_iuse(self, k, v):
1697                 self._pkg.iuse = self._pkg._iuse(
1698                         v.split(), self._pkg.root_config.iuse_implicit)
1699
1700         def _set_slot(self, k, v):
1701                 self._pkg.slot = v
1702
1703         def _set_use(self, k, v):
1704                 self._pkg.use = self._pkg._use(v.split())
1705
1706         def _set_counter(self, k, v):
1707                 if isinstance(v, basestring):
1708                         try:
1709                                 v = long(v.strip())
1710                         except ValueError:
1711                                 v = 0
1712                 self._pkg.counter = v
1713
1714         def _set__mtime_(self, k, v):
1715                 if isinstance(v, basestring):
1716                         try:
1717                                 v = long(v.strip())
1718                         except ValueError:
1719                                 v = 0
1720                 self._pkg.mtime = v
1721
1722 class EbuildFetchonly(SlotObject):
1723
1724         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1725
1726         def execute(self):
1727                 settings = self.settings
1728                 pkg = self.pkg
1729                 portdb = pkg.root_config.trees["porttree"].dbapi
1730                 ebuild_path = portdb.findname(pkg.cpv)
1731                 settings.setcpv(pkg)
1732                 debug = settings.get("PORTAGE_DEBUG") == "1"
1733                 use_cache = 1 # always true
1734                 portage.doebuild_environment(ebuild_path, "fetch",
1735                         settings["ROOT"], settings, debug, use_cache, portdb)
1736                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1737
1738                 if restrict_fetch:
1739                         rval = self._execute_with_builddir()
1740                 else:
1741                         rval = portage.doebuild(ebuild_path, "fetch",
1742                                 settings["ROOT"], settings, debug=debug,
1743                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744                                 mydbapi=portdb, tree="porttree")
1745
1746                         if rval != os.EX_OK:
1747                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748                                 eerror(msg, phase="unpack", key=pkg.cpv)
1749
1750                 return rval
1751
1752         def _execute_with_builddir(self):
1753                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754                 # ensuring sane $PWD (bug #239560) and storing elog
1755                 # messages. Use a private temp directory, in order
1756                 # to avoid locking the main one.
1757                 settings = self.settings
1758                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759                 from tempfile import mkdtemp
1760                 try:
1761                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1762                 except OSError, e:
1763                         if e.errno != portage.exception.PermissionDenied.errno:
1764                                 raise
1765                         raise portage.exception.PermissionDenied(global_tmpdir)
1766                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767                 settings.backup_changes("PORTAGE_TMPDIR")
1768                 try:
1769                         retval = self._execute()
1770                 finally:
1771                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1772                         settings.backup_changes("PORTAGE_TMPDIR")
1773                         shutil.rmtree(private_tmpdir)
1774                 return retval
1775
1776         def _execute(self):
1777                 settings = self.settings
1778                 pkg = self.pkg
1779                 root_config = pkg.root_config
1780                 portdb = root_config.trees["porttree"].dbapi
1781                 ebuild_path = portdb.findname(pkg.cpv)
1782                 debug = settings.get("PORTAGE_DEBUG") == "1"
1783                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1784
1785                 retval = portage.doebuild(ebuild_path, "fetch",
1786                         self.settings["ROOT"], self.settings, debug=debug,
1787                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788                         mydbapi=portdb, tree="porttree")
1789
1790                 if retval != os.EX_OK:
1791                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792                         eerror(msg, phase="unpack", key=pkg.cpv)
1793
1794                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1795                 return retval
1796
1797 class PollConstants(object):
1798
1799         """
1800         Provides POLL* constants that are equivalent to those from the
1801         select module, for use by PollSelectAdapter.
1802         """
1803
1804         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1805         v = 1
1806         for k in names:
1807                 locals()[k] = getattr(select, k, v)
1808                 v *= 2
1809         del k, v
1810
1811 class AsynchronousTask(SlotObject):
1812         """
1813         Subclasses override _wait() and _poll() so that calls
1814         to public methods can be wrapped for implementing
1815         hooks such as exit listener notification.
1816
1817         Sublasses should call self.wait() to notify exit listeners after
1818         the task is complete and self.returncode has been set.
1819         """
1820
1821         __slots__ = ("background", "cancelled", "returncode") + \
1822                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1823
1824         def start(self):
1825                 """
1826                 Start an asynchronous task and then return as soon as possible.
1827                 """
1828                 self._start_hook()
1829                 self._start()
1830
1831         def _start(self):
1832                 raise NotImplementedError(self)
1833
1834         def isAlive(self):
1835                 return self.returncode is None
1836
1837         def poll(self):
1838                 self._wait_hook()
1839                 return self._poll()
1840
1841         def _poll(self):
1842                 return self.returncode
1843
1844         def wait(self):
1845                 if self.returncode is None:
1846                         self._wait()
1847                 self._wait_hook()
1848                 return self.returncode
1849
1850         def _wait(self):
1851                 return self.returncode
1852
1853         def cancel(self):
1854                 self.cancelled = True
1855                 self.wait()
1856
1857         def addStartListener(self, f):
1858                 """
1859                 The function will be called with one argument, a reference to self.
1860                 """
1861                 if self._start_listeners is None:
1862                         self._start_listeners = []
1863                 self._start_listeners.append(f)
1864
1865         def removeStartListener(self, f):
1866                 if self._start_listeners is None:
1867                         return
1868                 self._start_listeners.remove(f)
1869
1870         def _start_hook(self):
1871                 if self._start_listeners is not None:
1872                         start_listeners = self._start_listeners
1873                         self._start_listeners = None
1874
1875                         for f in start_listeners:
1876                                 f(self)
1877
1878         def addExitListener(self, f):
1879                 """
1880                 The function will be called with one argument, a reference to self.
1881                 """
1882                 if self._exit_listeners is None:
1883                         self._exit_listeners = []
1884                 self._exit_listeners.append(f)
1885
1886         def removeExitListener(self, f):
1887                 if self._exit_listeners is None:
1888                         if self._exit_listener_stack is not None:
1889                                 self._exit_listener_stack.remove(f)
1890                         return
1891                 self._exit_listeners.remove(f)
1892
1893         def _wait_hook(self):
1894                 """
1895                 Call this method after the task completes, just before returning
1896                 the returncode from wait() or poll(). This hook is
1897                 used to trigger exit listeners when the returncode first
1898                 becomes available.
1899                 """
1900                 if self.returncode is not None and \
1901                         self._exit_listeners is not None:
1902
1903                         # This prevents recursion, in case one of the
1904                         # exit handlers triggers this method again by
1905                         # calling wait(). Use a stack that gives
1906                         # removeExitListener() an opportunity to consume
1907                         # listeners from the stack, before they can get
1908                         # called below. This is necessary because a call
1909                         # to one exit listener may result in a call to
1910                         # removeExitListener() for another listener on
1911                         # the stack. That listener needs to be removed
1912                         # from the stack since it would be inconsistent
1913                         # to call it after it has been been passed into
1914                         # removeExitListener().
1915                         self._exit_listener_stack = self._exit_listeners
1916                         self._exit_listeners = None
1917
1918                         self._exit_listener_stack.reverse()
1919                         while self._exit_listener_stack:
1920                                 self._exit_listener_stack.pop()(self)
1921
1922 class AbstractPollTask(AsynchronousTask):
1923
1924         __slots__ = ("scheduler",) + \
1925                 ("_registered",)
1926
1927         _bufsize = 4096
1928         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1930                 _exceptional_events
1931
1932         def _unregister(self):
1933                 raise NotImplementedError(self)
1934
1935         def _unregister_if_appropriate(self, event):
1936                 if self._registered:
1937                         if event & self._exceptional_events:
1938                                 self._unregister()
1939                                 self.cancel()
1940                         elif event & PollConstants.POLLHUP:
1941                                 self._unregister()
1942                                 self.wait()
1943
1944 class PipeReader(AbstractPollTask):
1945
1946         """
1947         Reads output from one or more files and saves it in memory,
1948         for retrieval via the getvalue() method. This is driven by
1949         the scheduler's poll() loop, so it runs entirely within the
1950         current process.
1951         """
1952
1953         __slots__ = ("input_files",) + \
1954                 ("_read_data", "_reg_ids")
1955
1956         def _start(self):
1957                 self._reg_ids = set()
1958                 self._read_data = []
1959                 for k, f in self.input_files.iteritems():
1960                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1963                                 self._registered_events, self._output_handler))
1964                 self._registered = True
1965
1966         def isAlive(self):
1967                 return self._registered
1968
1969         def cancel(self):
1970                 if self.returncode is None:
1971                         self.returncode = 1
1972                         self.cancelled = True
1973                 self.wait()
1974
1975         def _wait(self):
1976                 if self.returncode is not None:
1977                         return self.returncode
1978
1979                 if self._registered:
1980                         self.scheduler.schedule(self._reg_ids)
1981                         self._unregister()
1982
1983                 self.returncode = os.EX_OK
1984                 return self.returncode
1985
1986         def getvalue(self):
1987                 """Retrieve the entire contents"""
1988                 if sys.hexversion >= 0x3000000:
1989                         return bytes().join(self._read_data)
1990                 return "".join(self._read_data)
1991
1992         def close(self):
1993                 """Free the memory buffer."""
1994                 self._read_data = None
1995
1996         def _output_handler(self, fd, event):
1997
1998                 if event & PollConstants.POLLIN:
1999
2000                         for f in self.input_files.itervalues():
2001                                 if fd == f.fileno():
2002                                         break
2003
2004                         buf = array.array('B')
2005                         try:
2006                                 buf.fromfile(f, self._bufsize)
2007                         except EOFError:
2008                                 pass
2009
2010                         if buf:
2011                                 self._read_data.append(buf.tostring())
2012                         else:
2013                                 self._unregister()
2014                                 self.wait()
2015
2016                 self._unregister_if_appropriate(event)
2017                 return self._registered
2018
2019         def _unregister(self):
2020                 """
2021                 Unregister from the scheduler and close open files.
2022                 """
2023
2024                 self._registered = False
2025
2026                 if self._reg_ids is not None:
2027                         for reg_id in self._reg_ids:
2028                                 self.scheduler.unregister(reg_id)
2029                         self._reg_ids = None
2030
2031                 if self.input_files is not None:
2032                         for f in self.input_files.itervalues():
2033                                 f.close()
2034                         self.input_files = None
2035
2036 class CompositeTask(AsynchronousTask):
2037
2038         __slots__ = ("scheduler",) + ("_current_task",)
2039
2040         def isAlive(self):
2041                 return self._current_task is not None
2042
2043         def cancel(self):
2044                 self.cancelled = True
2045                 if self._current_task is not None:
2046                         self._current_task.cancel()
2047
2048         def _poll(self):
2049                 """
2050                 This does a loop calling self._current_task.poll()
2051                 repeatedly as long as the value of self._current_task
2052                 keeps changing. It calls poll() a maximum of one time
2053                 for a given self._current_task instance. This is useful
2054                 since calling poll() on a task can trigger advance to
2055                 the next task could eventually lead to the returncode
2056                 being set in cases when polling only a single task would
2057                 not have the same effect.
2058                 """
2059
2060                 prev = None
2061                 while True:
2062                         task = self._current_task
2063                         if task is None or task is prev:
2064                                 # don't poll the same task more than once
2065                                 break
2066                         task.poll()
2067                         prev = task
2068
2069                 return self.returncode
2070
2071         def _wait(self):
2072
2073                 prev = None
2074                 while True:
2075                         task = self._current_task
2076                         if task is None:
2077                                 # don't wait for the same task more than once
2078                                 break
2079                         if task is prev:
2080                                 # Before the task.wait() method returned, an exit
2081                                 # listener should have set self._current_task to either
2082                                 # a different task or None. Something is wrong.
2083                                 raise AssertionError("self._current_task has not " + \
2084                                         "changed since calling wait", self, task)
2085                         task.wait()
2086                         prev = task
2087
2088                 return self.returncode
2089
2090         def _assert_current(self, task):
2091                 """
2092                 Raises an AssertionError if the given task is not the
2093                 same one as self._current_task. This can be useful
2094                 for detecting bugs.
2095                 """
2096                 if task is not self._current_task:
2097                         raise AssertionError("Unrecognized task: %s" % (task,))
2098
2099         def _default_exit(self, task):
2100                 """
2101                 Calls _assert_current() on the given task and then sets the
2102                 composite returncode attribute if task.returncode != os.EX_OK.
2103                 If the task failed then self._current_task will be set to None.
2104                 Subclasses can use this as a generic task exit callback.
2105
2106                 @rtype: int
2107                 @returns: The task.returncode attribute.
2108                 """
2109                 self._assert_current(task)
2110                 if task.returncode != os.EX_OK:
2111                         self.returncode = task.returncode
2112                         self._current_task = None
2113                 return task.returncode
2114
2115         def _final_exit(self, task):
2116                 """
2117                 Assumes that task is the final task of this composite task.
2118                 Calls _default_exit() and sets self.returncode to the task's
2119                 returncode and sets self._current_task to None.
2120                 """
2121                 self._default_exit(task)
2122                 self._current_task = None
2123                 self.returncode = task.returncode
2124                 return self.returncode
2125
2126         def _default_final_exit(self, task):
2127                 """
2128                 This calls _final_exit() and then wait().
2129
2130                 Subclasses can use this as a generic final task exit callback.
2131
2132                 """
2133                 self._final_exit(task)
2134                 return self.wait()
2135
2136         def _start_task(self, task, exit_handler):
2137                 """
2138                 Register exit handler for the given task, set it
2139                 as self._current_task, and call task.start().
2140
2141                 Subclasses can use this as a generic way to start
2142                 a task.
2143
2144                 """
2145                 task.addExitListener(exit_handler)
2146                 self._current_task = task
2147                 task.start()
2148
2149 class TaskSequence(CompositeTask):
2150         """
2151         A collection of tasks that executes sequentially. Each task
2152         must have a addExitListener() method that can be used as
2153         a means to trigger movement from one task to the next.
2154         """
2155
2156         __slots__ = ("_task_queue",)
2157
2158         def __init__(self, **kwargs):
2159                 AsynchronousTask.__init__(self, **kwargs)
2160                 self._task_queue = deque()
2161
2162         def add(self, task):
2163                 self._task_queue.append(task)
2164
2165         def _start(self):
2166                 self._start_next_task()
2167
2168         def cancel(self):
2169                 self._task_queue.clear()
2170                 CompositeTask.cancel(self)
2171
2172         def _start_next_task(self):
2173                 self._start_task(self._task_queue.popleft(),
2174                         self._task_exit_handler)
2175
2176         def _task_exit_handler(self, task):
2177                 if self._default_exit(task) != os.EX_OK:
2178                         self.wait()
2179                 elif self._task_queue:
2180                         self._start_next_task()
2181                 else:
2182                         self._final_exit(task)
2183                         self.wait()
2184
2185 class SubProcess(AbstractPollTask):
2186
2187         __slots__ = ("pid",) + \
2188                 ("_files", "_reg_id")
2189
2190         # A file descriptor is required for the scheduler to monitor changes from
2191         # inside a poll() loop. When logging is not enabled, create a pipe just to
2192         # serve this purpose alone.
2193         _dummy_pipe_fd = 9
2194
2195         def _poll(self):
2196                 if self.returncode is not None:
2197                         return self.returncode
2198                 if self.pid is None:
2199                         return self.returncode
2200                 if self._registered:
2201                         return self.returncode
2202
2203                 try:
2204                         retval = os.waitpid(self.pid, os.WNOHANG)
2205                 except OSError, e:
2206                         if e.errno != errno.ECHILD:
2207                                 raise
2208                         del e
2209                         retval = (self.pid, 1)
2210
2211                 if retval == (0, 0):
2212                         return None
2213                 self._set_returncode(retval)
2214                 return self.returncode
2215
2216         def cancel(self):
2217                 if self.isAlive():
2218                         try:
2219                                 os.kill(self.pid, signal.SIGTERM)
2220                         except OSError, e:
2221                                 if e.errno != errno.ESRCH:
2222                                         raise
2223                                 del e
2224
2225                 self.cancelled = True
2226                 if self.pid is not None:
2227                         self.wait()
2228                 return self.returncode
2229
2230         def isAlive(self):
2231                 return self.pid is not None and \
2232                         self.returncode is None
2233
2234         def _wait(self):
2235
2236                 if self.returncode is not None:
2237                         return self.returncode
2238
2239                 if self._registered:
2240                         self.scheduler.schedule(self._reg_id)
2241                         self._unregister()
2242                         if self.returncode is not None:
2243                                 return self.returncode
2244
2245                 try:
2246                         wait_retval = os.waitpid(self.pid, 0)
2247                 except OSError, e:
2248                         if e.errno != errno.ECHILD:
2249                                 raise
2250                         del e
2251                         self._set_returncode((self.pid, 1))
2252                 else:
2253                         self._set_returncode(wait_retval)
2254
2255                 return self.returncode
2256
2257         def _unregister(self):
2258                 """
2259                 Unregister from the scheduler and close open files.
2260                 """
2261
2262                 self._registered = False
2263
2264                 if self._reg_id is not None:
2265                         self.scheduler.unregister(self._reg_id)
2266                         self._reg_id = None
2267
2268                 if self._files is not None:
2269                         for f in self._files.itervalues():
2270                                 f.close()
2271                         self._files = None
2272
2273         def _set_returncode(self, wait_retval):
2274
2275                 retval = wait_retval[1]
2276
2277                 if retval != os.EX_OK:
2278                         if retval & 0xff:
2279                                 retval = (retval & 0xff) << 8
2280                         else:
2281                                 retval = retval >> 8
2282
2283                 self.returncode = retval
2284
2285 class SpawnProcess(SubProcess):
2286
2287         """
2288         Constructor keyword args are passed into portage.process.spawn().
2289         The required "args" keyword argument will be passed as the first
2290         spawn() argument.
2291         """
2292
2293         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294                 "uid", "gid", "groups", "umask", "logfile",
2295                 "path_lookup", "pre_exec")
2296
2297         __slots__ = ("args",) + \
2298                 _spawn_kwarg_names
2299
2300         _file_names = ("log", "process", "stdout")
2301         _files_dict = slot_dict_class(_file_names, prefix="")
2302
2303         def _start(self):
2304
2305                 if self.cancelled:
2306                         return
2307
2308                 if self.fd_pipes is None:
2309                         self.fd_pipes = {}
2310                 fd_pipes = self.fd_pipes
2311                 fd_pipes.setdefault(0, sys.stdin.fileno())
2312                 fd_pipes.setdefault(1, sys.stdout.fileno())
2313                 fd_pipes.setdefault(2, sys.stderr.fileno())
2314
2315                 # flush any pending output
2316                 for fd in fd_pipes.itervalues():
2317                         if fd == sys.stdout.fileno():
2318                                 sys.stdout.flush()
2319                         if fd == sys.stderr.fileno():
2320                                 sys.stderr.flush()
2321
2322                 logfile = self.logfile
2323                 self._files = self._files_dict()
2324                 files = self._files
2325
2326                 master_fd, slave_fd = self._pipe(fd_pipes)
2327                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2329
2330                 null_input = None
2331                 fd_pipes_orig = fd_pipes.copy()
2332                 if self.background:
2333                         # TODO: Use job control functions like tcsetpgrp() to control
2334                         # access to stdin. Until then, use /dev/null so that any
2335                         # attempts to read from stdin will immediately return EOF
2336                         # instead of blocking indefinitely.
2337                         null_input = open('/dev/null', 'rb')
2338                         fd_pipes[0] = null_input.fileno()
2339                 else:
2340                         fd_pipes[0] = fd_pipes_orig[0]
2341
2342                 files.process = os.fdopen(master_fd, 'rb')
2343                 if logfile is not None:
2344
2345                         fd_pipes[1] = slave_fd
2346                         fd_pipes[2] = slave_fd
2347
2348                         files.log = open(logfile, mode='ab')
2349                         portage.util.apply_secpass_permissions(logfile,
2350                                 uid=portage.portage_uid, gid=portage.portage_gid,
2351                                 mode=0660)
2352
2353                         if not self.background:
2354                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2355
2356                         output_handler = self._output_handler
2357
2358                 else:
2359
2360                         # Create a dummy pipe so the scheduler can monitor
2361                         # the process from inside a poll() loop.
2362                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2363                         if self.background:
2364                                 fd_pipes[1] = slave_fd
2365                                 fd_pipes[2] = slave_fd
2366                         output_handler = self._dummy_handler
2367
2368                 kwargs = {}
2369                 for k in self._spawn_kwarg_names:
2370                         v = getattr(self, k)
2371                         if v is not None:
2372                                 kwargs[k] = v
2373
2374                 kwargs["fd_pipes"] = fd_pipes
2375                 kwargs["returnpid"] = True
2376                 kwargs.pop("logfile", None)
2377
2378                 self._reg_id = self.scheduler.register(files.process.fileno(),
2379                         self._registered_events, output_handler)
2380                 self._registered = True
2381
2382                 retval = self._spawn(self.args, **kwargs)
2383
2384                 os.close(slave_fd)
2385                 if null_input is not None:
2386                         null_input.close()
2387
2388                 if isinstance(retval, int):
2389                         # spawn failed
2390                         self._unregister()
2391                         self.returncode = retval
2392                         self.wait()
2393                         return
2394
2395                 self.pid = retval[0]
2396                 portage.process.spawned_pids.remove(self.pid)
2397
2398         def _pipe(self, fd_pipes):
2399                 """
2400                 @type fd_pipes: dict
2401                 @param fd_pipes: pipes from which to copy terminal size if desired.
2402                 """
2403                 return os.pipe()
2404
2405         def _spawn(self, args, **kwargs):
2406                 return portage.process.spawn(args, **kwargs)
2407
2408         def _output_handler(self, fd, event):
2409
2410                 if event & PollConstants.POLLIN:
2411
2412                         files = self._files
2413                         buf = array.array('B')
2414                         try:
2415                                 buf.fromfile(files.process, self._bufsize)
2416                         except EOFError:
2417                                 pass
2418
2419                         if buf:
2420                                 if not self.background:
2421                                         buf.tofile(files.stdout)
2422                                         files.stdout.flush()
2423                                 buf.tofile(files.log)
2424                                 files.log.flush()
2425                         else:
2426                                 self._unregister()
2427                                 self.wait()
2428
2429                 self._unregister_if_appropriate(event)
2430                 return self._registered
2431
2432         def _dummy_handler(self, fd, event):
2433                 """
2434                 This method is mainly interested in detecting EOF, since
2435                 the only purpose of the pipe is to allow the scheduler to
2436                 monitor the process from inside a poll() loop.
2437                 """
2438
2439                 if event & PollConstants.POLLIN:
2440
2441                         buf = array.array('B')
2442                         try:
2443                                 buf.fromfile(self._files.process, self._bufsize)
2444                         except EOFError:
2445                                 pass
2446
2447                         if buf:
2448                                 pass
2449                         else:
2450                                 self._unregister()
2451                                 self.wait()
2452
2453                 self._unregister_if_appropriate(event)
2454                 return self._registered
2455
2456 class MiscFunctionsProcess(SpawnProcess):
2457         """
2458         Spawns misc-functions.sh with an existing ebuild environment.
2459         """
2460
2461         __slots__ = ("commands", "phase", "pkg", "settings")
2462
2463         def _start(self):
2464                 settings = self.settings
2465                 settings.pop("EBUILD_PHASE", None)
2466                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467                 misc_sh_binary = os.path.join(portage_bin_path,
2468                         os.path.basename(portage.const.MISC_SH_BINARY))
2469
2470                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2472
2473                 portage._doebuild_exit_status_unlink(
2474                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2475
2476                 SpawnProcess._start(self)
2477
2478         def _spawn(self, args, **kwargs):
2479                 settings = self.settings
2480                 debug = settings.get("PORTAGE_DEBUG") == "1"
2481                 return portage.spawn(" ".join(args), settings,
2482                         debug=debug, **kwargs)
2483
2484         def _set_returncode(self, wait_retval):
2485                 SpawnProcess._set_returncode(self, wait_retval)
2486                 self.returncode = portage._doebuild_exit_status_check_and_log(
2487                         self.settings, self.phase, self.returncode)
2488
2489 class EbuildFetcher(SpawnProcess):
2490
2491         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2492                 ("_build_dir",)
2493
2494         def _start(self):
2495
2496                 root_config = self.pkg.root_config
2497                 portdb = root_config.trees["porttree"].dbapi
2498                 ebuild_path = portdb.findname(self.pkg.cpv)
2499                 settings = self.config_pool.allocate()
2500                 settings.setcpv(self.pkg)
2501
2502                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503                 # should not be touched since otherwise it could interfere with
2504                 # another instance of the same cpv concurrently being built for a
2505                 # different $ROOT (currently, builds only cooperate with prefetchers
2506                 # that are spawned for the same $ROOT).
2507                 if not self.prefetch:
2508                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509                         self._build_dir.lock()
2510                         self._build_dir.clean()
2511                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512                         if self.logfile is None:
2513                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 phase = "fetch"
2516                 if self.fetchall:
2517                         phase = "fetchall"
2518
2519                 # If any incremental variables have been overridden
2520                 # via the environment, those values need to be passed
2521                 # along here so that they are correctly considered by
2522                 # the config instance in the subproccess.
2523                 fetch_env = os.environ.copy()
2524
2525                 nocolor = settings.get("NOCOLOR")
2526                 if nocolor is not None:
2527                         fetch_env["NOCOLOR"] = nocolor
2528
2529                 fetch_env["PORTAGE_NICENESS"] = "0"
2530                 if self.prefetch:
2531                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2532
2533                 ebuild_binary = os.path.join(
2534                         settings["PORTAGE_BIN_PATH"], "ebuild")
2535
2536                 fetch_args = [ebuild_binary, ebuild_path, phase]
2537                 debug = settings.get("PORTAGE_DEBUG") == "1"
2538                 if debug:
2539                         fetch_args.append("--debug")
2540
2541                 self.args = fetch_args
2542                 self.env = fetch_env
2543                 SpawnProcess._start(self)
2544
2545         def _pipe(self, fd_pipes):
2546                 """When appropriate, use a pty so that fetcher progress bars,
2547                 like wget has, will work properly."""
2548                 if self.background or not sys.stdout.isatty():
2549                         # When the output only goes to a log file,
2550                         # there's no point in creating a pty.
2551                         return os.pipe()
2552                 stdout_pipe = fd_pipes.get(1)
2553                 got_pty, master_fd, slave_fd = \
2554                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555                 return (master_fd, slave_fd)
2556
2557         def _set_returncode(self, wait_retval):
2558                 SpawnProcess._set_returncode(self, wait_retval)
2559                 # Collect elog messages that might have been
2560                 # created by the pkg_nofetch phase.
2561                 if self._build_dir is not None:
2562                         # Skip elog messages for prefetch, in order to avoid duplicates.
2563                         if not self.prefetch and self.returncode != os.EX_OK:
2564                                 elog_out = None
2565                                 if self.logfile is not None:
2566                                         if self.background:
2567                                                 elog_out = open(self.logfile, 'a')
2568                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569                                 if self.logfile is not None:
2570                                         msg += ", Log file:"
2571                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572                                 if self.logfile is not None:
2573                                         eerror(" '%s'" % (self.logfile,),
2574                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575                                 if elog_out is not None:
2576                                         elog_out.close()
2577                         if not self.prefetch:
2578                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579                         features = self._build_dir.settings.features
2580                         if self.returncode == os.EX_OK:
2581                                 self._build_dir.clean()
2582                         self._build_dir.unlock()
2583                         self.config_pool.deallocate(self._build_dir.settings)
2584                         self._build_dir = None
2585
2586 class EbuildBuildDir(SlotObject):
2587
2588         __slots__ = ("dir_path", "pkg", "settings",
2589                 "locked", "_catdir", "_lock_obj")
2590
2591         def __init__(self, **kwargs):
2592                 SlotObject.__init__(self, **kwargs)
2593                 self.locked = False
2594
2595         def lock(self):
2596                 """
2597                 This raises an AlreadyLocked exception if lock() is called
2598                 while a lock is already held. In order to avoid this, call
2599                 unlock() or check whether the "locked" attribute is True
2600                 or False before calling lock().
2601                 """
2602                 if self._lock_obj is not None:
2603                         raise self.AlreadyLocked((self._lock_obj,))
2604
2605                 dir_path = self.dir_path
2606                 if dir_path is None:
2607                         root_config = self.pkg.root_config
2608                         portdb = root_config.trees["porttree"].dbapi
2609                         ebuild_path = portdb.findname(self.pkg.cpv)
2610                         settings = self.settings
2611                         settings.setcpv(self.pkg)
2612                         debug = settings.get("PORTAGE_DEBUG") == "1"
2613                         use_cache = 1 # always true
2614                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615                                 self.settings, debug, use_cache, portdb)
2616                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2617
2618                 catdir = os.path.dirname(dir_path)
2619                 self._catdir = catdir
2620
2621                 portage.util.ensure_dirs(os.path.dirname(catdir),
2622                         gid=portage.portage_gid,
2623                         mode=070, mask=0)
2624                 catdir_lock = None
2625                 try:
2626                         catdir_lock = portage.locks.lockdir(catdir)
2627                         portage.util.ensure_dirs(catdir,
2628                                 gid=portage.portage_gid,
2629                                 mode=070, mask=0)
2630                         self._lock_obj = portage.locks.lockdir(dir_path)
2631                 finally:
2632                         self.locked = self._lock_obj is not None
2633                         if catdir_lock is not None:
2634                                 portage.locks.unlockdir(catdir_lock)
2635
2636         def clean(self):
2637                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638                 by keepwork or keeptemp in FEATURES."""
2639                 settings = self.settings
2640                 features = settings.features
2641                 if not ("keepwork" in features or "keeptemp" in features):
2642                         try:
2643                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644                         except EnvironmentError, e:
2645                                 if e.errno != errno.ENOENT:
2646                                         raise
2647                                 del e
2648
2649         def unlock(self):
2650                 if self._lock_obj is None:
2651                         return
2652
2653                 portage.locks.unlockdir(self._lock_obj)
2654                 self._lock_obj = None
2655                 self.locked = False
2656
2657                 catdir = self._catdir
2658                 catdir_lock = None
2659                 try:
2660                         catdir_lock = portage.locks.lockdir(catdir)
2661                 finally:
2662                         if catdir_lock:
2663                                 try:
2664                                         os.rmdir(catdir)
2665                                 except OSError, e:
2666                                         if e.errno not in (errno.ENOENT,
2667                                                 errno.ENOTEMPTY, errno.EEXIST):
2668                                                 raise
2669                                         del e
2670                                 portage.locks.unlockdir(catdir_lock)
2671
2672         class AlreadyLocked(portage.exception.PortageException):
2673                 pass
2674
2675 class EbuildBuild(CompositeTask):
2676
2677         __slots__ = ("args_set", "config_pool", "find_blockers",
2678                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679                 "prefetcher", "settings", "world_atom") + \
2680                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2681
2682         def _start(self):
2683
2684                 logger = self.logger
2685                 opts = self.opts
2686                 pkg = self.pkg
2687                 settings = self.settings
2688                 world_atom = self.world_atom
2689                 root_config = pkg.root_config
2690                 tree = "porttree"
2691                 self._tree = tree
2692                 portdb = root_config.trees[tree].dbapi
2693                 settings.setcpv(pkg)
2694                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695                 ebuild_path = portdb.findname(self.pkg.cpv)
2696                 self._ebuild_path = ebuild_path
2697
2698                 prefetcher = self.prefetcher
2699                 if prefetcher is None:
2700                         pass
2701                 elif not prefetcher.isAlive():
2702                         prefetcher.cancel()
2703                 elif prefetcher.poll() is None:
2704
2705                         waiting_msg = "Fetching files " + \
2706                                 "in the background. " + \
2707                                 "To view fetch progress, run `tail -f " + \
2708                                 "/var/log/emerge-fetch.log` in another " + \
2709                                 "terminal."
2710                         msg_prefix = colorize("GOOD", " * ")
2711                         from textwrap import wrap
2712                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713                                 for line in wrap(waiting_msg, 65))
2714                         if not self.background:
2715                                 writemsg(waiting_msg, noiselevel=-1)
2716
2717                         self._current_task = prefetcher
2718                         prefetcher.addExitListener(self._prefetch_exit)
2719                         return
2720
2721                 self._prefetch_exit(prefetcher)
2722
2723         def _prefetch_exit(self, prefetcher):
2724
2725                 opts = self.opts
2726                 pkg = self.pkg
2727                 settings = self.settings
2728
2729                 if opts.fetchonly:
2730                                 fetcher = EbuildFetchonly(
2731                                         fetch_all=opts.fetch_all_uri,
2732                                         pkg=pkg, pretend=opts.pretend,
2733                                         settings=settings)
2734                                 retval = fetcher.execute()
2735                                 self.returncode = retval
2736                                 self.wait()
2737                                 return
2738
2739                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740                         fetchall=opts.fetch_all_uri,
2741                         fetchonly=opts.fetchonly,
2742                         background=self.background,
2743                         pkg=pkg, scheduler=self.scheduler)
2744
2745                 self._start_task(fetcher, self._fetch_exit)
2746
2747         def _fetch_exit(self, fetcher):
2748                 opts = self.opts
2749                 pkg = self.pkg
2750
2751                 fetch_failed = False
2752                 if opts.fetchonly:
2753                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2754                 else:
2755                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2756
2757                 if fetch_failed and fetcher.logfile is not None and \
2758                         os.path.exists(fetcher.logfile):
2759                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2760
2761                 if not fetch_failed and fetcher.logfile is not None:
2762                         # Fetch was successful, so remove the fetch log.
2763                         try:
2764                                 os.unlink(fetcher.logfile)
2765                         except OSError:
2766                                 pass
2767
2768                 if fetch_failed or opts.fetchonly:
2769                         self.wait()
2770                         return
2771
2772                 logger = self.logger
2773                 opts = self.opts
2774                 pkg_count = self.pkg_count
2775                 scheduler = self.scheduler
2776                 settings = self.settings
2777                 features = settings.features
2778                 ebuild_path = self._ebuild_path
2779                 system_set = pkg.root_config.sets["system"]
2780
2781                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782                 self._build_dir.lock()
2783
2784                 # Cleaning is triggered before the setup
2785                 # phase, in portage.doebuild().
2786                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788                 short_msg = "emerge: (%s of %s) %s Clean" % \
2789                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790                 logger.log(msg, short_msg=short_msg)
2791
2792                 #buildsyspkg: Check if we need to _force_ binary package creation
2793                 self._issyspkg = "buildsyspkg" in features and \
2794                                 system_set.findAtomForPackage(pkg) and \
2795                                 not opts.buildpkg
2796
2797                 if opts.buildpkg or self._issyspkg:
2798
2799                         self._buildpkg = True
2800
2801                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803                         short_msg = "emerge: (%s of %s) %s Compile" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805                         logger.log(msg, short_msg=short_msg)
2806
2807                 else:
2808                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810                         short_msg = "emerge: (%s of %s) %s Compile" % \
2811                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812                         logger.log(msg, short_msg=short_msg)
2813
2814                 build = EbuildExecuter(background=self.background, pkg=pkg,
2815                         scheduler=scheduler, settings=settings)
2816                 self._start_task(build, self._build_exit)
2817
2818         def _unlock_builddir(self):
2819                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820                 self._build_dir.unlock()
2821
2822         def _build_exit(self, build):
2823                 if self._default_exit(build) != os.EX_OK:
2824                         self._unlock_builddir()
2825                         self.wait()
2826                         return
2827
2828                 opts = self.opts
2829                 buildpkg = self._buildpkg
2830
2831                 if not buildpkg:
2832                         self._final_exit(build)
2833                         self.wait()
2834                         return
2835
2836                 if self._issyspkg:
2837                         msg = ">>> This is a system package, " + \
2838                                 "let's pack a rescue tarball.\n"
2839
2840                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2841                         if log_path is not None:
2842                                 log_file = open(log_path, 'a')
2843                                 try:
2844                                         log_file.write(msg)
2845                                 finally:
2846                                         log_file.close()
2847
2848                         if not self.background:
2849                                 portage.writemsg_stdout(msg, noiselevel=-1)
2850
2851                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852                         scheduler=self.scheduler, settings=self.settings)
2853
2854                 self._start_task(packager, self._buildpkg_exit)
2855
2856         def _buildpkg_exit(self, packager):
2857                 """
2858                 Released build dir lock when there is a failure or
2859                 when in buildpkgonly mode. Otherwise, the lock will
2860                 be released when merge() is called.
2861                 """
2862
2863                 if self._default_exit(packager) != os.EX_OK:
2864                         self._unlock_builddir()
2865                         self.wait()
2866                         return
2867
2868                 if self.opts.buildpkgonly:
2869                         # Need to call "clean" phase for buildpkgonly mode
2870                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2871                         phase = "clean"
2872                         clean_phase = EbuildPhase(background=self.background,
2873                                 pkg=self.pkg, phase=phase,
2874                                 scheduler=self.scheduler, settings=self.settings,
2875                                 tree=self._tree)
2876                         self._start_task(clean_phase, self._clean_exit)
2877                         return
2878
2879                 # Continue holding the builddir lock until
2880                 # after the package has been installed.
2881                 self._current_task = None
2882                 self.returncode = packager.returncode
2883                 self.wait()
2884
2885         def _clean_exit(self, clean_phase):
2886                 if self._final_exit(clean_phase) != os.EX_OK or \
2887                         self.opts.buildpkgonly:
2888                         self._unlock_builddir()
2889                 self.wait()
2890
2891         def install(self):
2892                 """
2893                 Install the package and then clean up and release locks.
2894                 Only call this after the build has completed successfully
2895                 and neither fetchonly nor buildpkgonly mode are enabled.
2896                 """
2897
2898                 find_blockers = self.find_blockers
2899                 ldpath_mtimes = self.ldpath_mtimes
2900                 logger = self.logger
2901                 pkg = self.pkg
2902                 pkg_count = self.pkg_count
2903                 settings = self.settings
2904                 world_atom = self.world_atom
2905                 ebuild_path = self._ebuild_path
2906                 tree = self._tree
2907
2908                 merge = EbuildMerge(find_blockers=self.find_blockers,
2909                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910                         pkg_count=pkg_count, pkg_path=ebuild_path,
2911                         scheduler=self.scheduler,
2912                         settings=settings, tree=tree, world_atom=world_atom)
2913
2914                 msg = " === (%s of %s) Merging (%s::%s)" % \
2915                         (pkg_count.curval, pkg_count.maxval,
2916                         pkg.cpv, ebuild_path)
2917                 short_msg = "emerge: (%s of %s) %s Merge" % \
2918                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919                 logger.log(msg, short_msg=short_msg)
2920
2921                 try:
2922                         rval = merge.execute()
2923                 finally:
2924                         self._unlock_builddir()
2925
2926                 return rval
2927
2928 class EbuildExecuter(CompositeTask):
2929
2930         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2931
2932         _phases = ("prepare", "configure", "compile", "test", "install")
2933
2934         _live_eclasses = frozenset([
2935                 "bzr",
2936                 "cvs",
2937                 "darcs",
2938                 "git",
2939                 "mercurial",
2940                 "subversion"
2941         ])
2942
2943         def _start(self):
2944                 self._tree = "porttree"
2945                 pkg = self.pkg
2946                 phase = "clean"
2947                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949                 self._start_task(clean_phase, self._clean_phase_exit)
2950
2951         def _clean_phase_exit(self, clean_phase):
2952
2953                 if self._default_exit(clean_phase) != os.EX_OK:
2954                         self.wait()
2955                         return
2956
2957                 pkg = self.pkg
2958                 scheduler = self.scheduler
2959                 settings = self.settings
2960                 cleanup = 1
2961
2962                 # This initializes PORTAGE_LOG_FILE.
2963                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2964
2965                 setup_phase = EbuildPhase(background=self.background,
2966                         pkg=pkg, phase="setup", scheduler=scheduler,
2967                         settings=settings, tree=self._tree)
2968
2969                 setup_phase.addExitListener(self._setup_exit)
2970                 self._current_task = setup_phase
2971                 self.scheduler.scheduleSetup(setup_phase)
2972
2973         def _setup_exit(self, setup_phase):
2974
2975                 if self._default_exit(setup_phase) != os.EX_OK:
2976                         self.wait()
2977                         return
2978
2979                 unpack_phase = EbuildPhase(background=self.background,
2980                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981                         settings=self.settings, tree=self._tree)
2982
2983                 if self._live_eclasses.intersection(self.pkg.inherited):
2984                         # Serialize $DISTDIR access for live ebuilds since
2985                         # otherwise they can interfere with eachother.
2986
2987                         unpack_phase.addExitListener(self._unpack_exit)
2988                         self._current_task = unpack_phase
2989                         self.scheduler.scheduleUnpack(unpack_phase)
2990
2991                 else:
2992                         self._start_task(unpack_phase, self._unpack_exit)
2993
2994         def _unpack_exit(self, unpack_phase):
2995
2996                 if self._default_exit(unpack_phase) != os.EX_OK:
2997                         self.wait()
2998                         return
2999
3000                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3001
3002                 pkg = self.pkg
3003                 phases = self._phases
3004                 eapi = pkg.metadata["EAPI"]
3005                 if eapi in ("0", "1"):
3006                         # skip src_prepare and src_configure
3007                         phases = phases[2:]
3008
3009                 for phase in phases:
3010                         ebuild_phases.add(EbuildPhase(background=self.background,
3011                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012                                 settings=self.settings, tree=self._tree))
3013
3014                 self._start_task(ebuild_phases, self._default_final_exit)
3015
3016 class EbuildMetadataPhase(SubProcess):
3017
3018         """
3019         Asynchronous interface for the ebuild "depend" phase which is
3020         used to extract metadata from the ebuild.
3021         """
3022
3023         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3025                 ("_raw_metadata",)
3026
3027         _file_names = ("ebuild",)
3028         _files_dict = slot_dict_class(_file_names, prefix="")
3029         _metadata_fd = 9
3030
3031         def _start(self):
3032                 settings = self.settings
3033                 settings.reset()
3034                 ebuild_path = self.ebuild_path
3035                 debug = settings.get("PORTAGE_DEBUG") == "1"
3036                 master_fd = None
3037                 slave_fd = None
3038                 fd_pipes = None
3039                 if self.fd_pipes is not None:
3040                         fd_pipes = self.fd_pipes.copy()
3041                 else:
3042                         fd_pipes = {}
3043
3044                 fd_pipes.setdefault(0, sys.stdin.fileno())
3045                 fd_pipes.setdefault(1, sys.stdout.fileno())
3046                 fd_pipes.setdefault(2, sys.stderr.fileno())
3047
3048                 # flush any pending output
3049                 for fd in fd_pipes.itervalues():
3050                         if fd == sys.stdout.fileno():
3051                                 sys.stdout.flush()
3052                         if fd == sys.stderr.fileno():
3053                                 sys.stderr.flush()
3054
3055                 fd_pipes_orig = fd_pipes.copy()
3056                 self._files = self._files_dict()
3057                 files = self._files
3058
3059                 master_fd, slave_fd = os.pipe()
3060                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3062
3063                 fd_pipes[self._metadata_fd] = slave_fd
3064
3065                 self._raw_metadata = []
3066                 files.ebuild = os.fdopen(master_fd, 'r')
3067                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068                         self._registered_events, self._output_handler)
3069                 self._registered = True
3070
3071                 retval = portage.doebuild(ebuild_path, "depend",
3072                         settings["ROOT"], settings, debug,
3073                         mydbapi=self.portdb, tree="porttree",
3074                         fd_pipes=fd_pipes, returnpid=True)
3075
3076                 os.close(slave_fd)
3077
3078                 if isinstance(retval, int):
3079                         # doebuild failed before spawning
3080                         self._unregister()
3081                         self.returncode = retval
3082                         self.wait()
3083                         return
3084
3085                 self.pid = retval[0]
3086                 portage.process.spawned_pids.remove(self.pid)
3087
3088         def _output_handler(self, fd, event):
3089
3090                 if event & PollConstants.POLLIN:
3091                         self._raw_metadata.append(self._files.ebuild.read())
3092                         if not self._raw_metadata[-1]:
3093                                 self._unregister()
3094                                 self.wait()
3095
3096                 self._unregister_if_appropriate(event)
3097                 return self._registered
3098
3099         def _set_returncode(self, wait_retval):
3100                 SubProcess._set_returncode(self, wait_retval)
3101                 if self.returncode == os.EX_OK:
3102                         metadata_lines = "".join(self._raw_metadata).splitlines()
3103                         if len(portage.auxdbkeys) != len(metadata_lines):
3104                                 # Don't trust bash's returncode if the
3105                                 # number of lines is incorrect.
3106                                 self.returncode = 1
3107                         else:
3108                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3109                                 self.metadata_callback(self.cpv, self.ebuild_path,
3110                                         self.repo_path, metadata, self.ebuild_mtime)
3111
3112 class EbuildProcess(SpawnProcess):
3113
3114         __slots__ = ("phase", "pkg", "settings", "tree")
3115
3116         def _start(self):
3117                 # Don't open the log file during the clean phase since the
3118                 # open file can result in an nfs lock on $T/build.log which
3119                 # prevents the clean phase from removing $T.
3120                 if self.phase not in ("clean", "cleanrm"):
3121                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122                 SpawnProcess._start(self)
3123
3124         def _pipe(self, fd_pipes):
3125                 stdout_pipe = fd_pipes.get(1)
3126                 got_pty, master_fd, slave_fd = \
3127                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128                 return (master_fd, slave_fd)
3129
3130         def _spawn(self, args, **kwargs):
3131
3132                 root_config = self.pkg.root_config
3133                 tree = self.tree
3134                 mydbapi = root_config.trees[tree].dbapi
3135                 settings = self.settings
3136                 ebuild_path = settings["EBUILD"]
3137                 debug = settings.get("PORTAGE_DEBUG") == "1"
3138
3139                 rval = portage.doebuild(ebuild_path, self.phase,
3140                         root_config.root, settings, debug,
3141                         mydbapi=mydbapi, tree=tree, **kwargs)
3142
3143                 return rval
3144
3145         def _set_returncode(self, wait_retval):
3146                 SpawnProcess._set_returncode(self, wait_retval)
3147
3148                 if self.phase not in ("clean", "cleanrm"):
3149                         self.returncode = portage._doebuild_exit_status_check_and_log(
3150                                 self.settings, self.phase, self.returncode)
3151
3152                 if self.phase == "test" and self.returncode != os.EX_OK and \
3153                         "test-fail-continue" in self.settings.features:
3154                         self.returncode = os.EX_OK
3155
3156                 portage._post_phase_userpriv_perms(self.settings)
3157
3158 class EbuildPhase(CompositeTask):
3159
3160         __slots__ = ("background", "pkg", "phase",
3161                 "scheduler", "settings", "tree")
3162
3163         _post_phase_cmds = portage._post_phase_cmds
3164
3165         def _start(self):
3166
3167                 ebuild_process = EbuildProcess(background=self.background,
3168                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169                         settings=self.settings, tree=self.tree)
3170
3171                 self._start_task(ebuild_process, self._ebuild_exit)
3172
3173         def _ebuild_exit(self, ebuild_process):
3174
3175                 if self.phase == "install":
3176                         out = None
3177                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3178                         log_file = None
3179                         if self.background and log_path is not None:
3180                                 log_file = open(log_path, 'a')
3181                                 out = log_file
3182                         try:
3183                                 portage._check_build_log(self.settings, out=out)
3184                         finally:
3185                                 if log_file is not None:
3186                                         log_file.close()
3187
3188                 if self._default_exit(ebuild_process) != os.EX_OK:
3189                         self.wait()
3190                         return
3191
3192                 settings = self.settings
3193
3194                 if self.phase == "install":
3195                         portage._post_src_install_uid_fix(settings)
3196
3197                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198                 if post_phase_cmds is not None:
3199                         post_phase = MiscFunctionsProcess(background=self.background,
3200                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201                                 scheduler=self.scheduler, settings=settings)
3202                         self._start_task(post_phase, self._post_phase_exit)
3203                         return
3204
3205                 self.returncode = ebuild_process.returncode
3206                 self._current_task = None
3207                 self.wait()
3208
3209         def _post_phase_exit(self, post_phase):
3210                 if self._final_exit(post_phase) != os.EX_OK:
3211                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3212                                 noiselevel=-1)
3213                 self._current_task = None
3214                 self.wait()
3215                 return
3216
3217 class EbuildBinpkg(EbuildProcess):
3218         """
3219         This assumes that src_install() has successfully completed.
3220         """
3221         __slots__ = ("_binpkg_tmpfile",)
3222
3223         def _start(self):
3224                 self.phase = "package"
3225                 self.tree = "porttree"
3226                 pkg = self.pkg
3227                 root_config = pkg.root_config
3228                 portdb = root_config.trees["porttree"].dbapi
3229                 bintree = root_config.trees["bintree"]
3230                 ebuild_path = portdb.findname(self.pkg.cpv)
3231                 settings = self.settings
3232                 debug = settings.get("PORTAGE_DEBUG") == "1"
3233
3234                 bintree.prevent_collision(pkg.cpv)
3235                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236                         pkg.cpv + ".tbz2." + str(os.getpid()))
3237                 self._binpkg_tmpfile = binpkg_tmpfile
3238                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3240
3241                 try:
3242                         EbuildProcess._start(self)
3243                 finally:
3244                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3245
3246         def _set_returncode(self, wait_retval):
3247                 EbuildProcess._set_returncode(self, wait_retval)
3248
3249                 pkg = self.pkg
3250                 bintree = pkg.root_config.trees["bintree"]
3251                 binpkg_tmpfile = self._binpkg_tmpfile
3252                 if self.returncode == os.EX_OK:
3253                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3254
3255 class EbuildMerge(SlotObject):
3256
3257         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258                 "pkg", "pkg_count", "pkg_path", "pretend",
3259                 "scheduler", "settings", "tree", "world_atom")
3260
3261         def execute(self):
3262                 root_config = self.pkg.root_config
3263                 settings = self.settings
3264                 retval = portage.merge(settings["CATEGORY"],
3265                         settings["PF"], settings["D"],
3266                         os.path.join(settings["PORTAGE_BUILDDIR"],
3267                         "build-info"), root_config.root, settings,
3268                         myebuild=settings["EBUILD"],
3269                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270                         vartree=root_config.trees["vartree"],
3271                         prev_mtimes=self.ldpath_mtimes,
3272                         scheduler=self.scheduler,
3273                         blockers=self.find_blockers)
3274
3275                 if retval == os.EX_OK:
3276                         self.world_atom(self.pkg)
3277                         self._log_success()
3278
3279                 return retval
3280
3281         def _log_success(self):
3282                 pkg = self.pkg
3283                 pkg_count = self.pkg_count
3284                 pkg_path = self.pkg_path
3285                 logger = self.logger
3286                 if "noclean" not in self.settings.features:
3287                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289                         logger.log((" === (%s of %s) " + \
3290                                 "Post-Build Cleaning (%s::%s)") % \
3291                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292                                 short_msg=short_msg)
3293                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3295
3296 class PackageUninstall(AsynchronousTask):
3297
3298         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3299
3300         def _start(self):
3301                 try:
3302                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3303                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305                                 writemsg_level=self._writemsg_level)
3306                 except UninstallFailure, e:
3307                         self.returncode = e.status
3308                 else:
3309                         self.returncode = os.EX_OK
3310                 self.wait()
3311
3312         def _writemsg_level(self, msg, level=0, noiselevel=0):
3313
3314                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315                 background = self.background
3316
3317                 if log_path is None:
3318                         if not (background and level < logging.WARNING):
3319                                 portage.util.writemsg_level(msg,
3320                                         level=level, noiselevel=noiselevel)
3321                 else:
3322                         if not background:
3323                                 portage.util.writemsg_level(msg,
3324                                         level=level, noiselevel=noiselevel)
3325
3326                         f = open(log_path, 'a')
3327                         try:
3328                                 f.write(msg)
3329                         finally:
3330                                 f.close()
3331
3332 class Binpkg(CompositeTask):
3333
3334         __slots__ = ("find_blockers",
3335                 "ldpath_mtimes", "logger", "opts",
3336                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3339
3340         def _writemsg_level(self, msg, level=0, noiselevel=0):
3341
3342                 if not self.background:
3343                         portage.util.writemsg_level(msg,
3344                                 level=level, noiselevel=noiselevel)
3345
3346                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347                 if  log_path is not None:
3348                         f = open(log_path, 'a')
3349                         try:
3350                                 f.write(msg)
3351                         finally:
3352                                 f.close()
3353
3354         def _start(self):
3355
3356                 pkg = self.pkg
3357                 settings = self.settings
3358                 settings.setcpv(pkg)
3359                 self._tree = "bintree"
3360                 self._bintree = self.pkg.root_config.trees[self._tree]
3361                 self._verify = not self.opts.pretend
3362
3363                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364                         "portage", pkg.category, pkg.pf)
3365                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366                         pkg=pkg, settings=settings)
3367                 self._image_dir = os.path.join(dir_path, "image")
3368                 self._infloc = os.path.join(dir_path, "build-info")
3369                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370                 settings["EBUILD"] = self._ebuild_path
3371                 debug = settings.get("PORTAGE_DEBUG") == "1"
3372                 portage.doebuild_environment(self._ebuild_path, "setup",
3373                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3375
3376                 # The prefetcher has already completed or it
3377                 # could be running now. If it's running now,
3378                 # wait for it to complete since it holds
3379                 # a lock on the file being fetched. The
3380                 # portage.locks functions are only designed
3381                 # to work between separate processes. Since
3382                 # the lock is held by the current process,
3383                 # use the scheduler and fetcher methods to
3384                 # synchronize with the fetcher.
3385                 prefetcher = self.prefetcher
3386                 if prefetcher is None:
3387                         pass
3388                 elif not prefetcher.isAlive():
3389                         prefetcher.cancel()
3390                 elif prefetcher.poll() is None:
3391
3392                         waiting_msg = ("Fetching '%s' " + \
3393                                 "in the background. " + \
3394                                 "To view fetch progress, run `tail -f " + \
3395                                 "/var/log/emerge-fetch.log` in another " + \
3396                                 "terminal.") % prefetcher.pkg_path
3397                         msg_prefix = colorize("GOOD", " * ")
3398                         from textwrap import wrap
3399                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400                                 for line in wrap(waiting_msg, 65))
3401                         if not self.background:
3402                                 writemsg(waiting_msg, noiselevel=-1)
3403
3404                         self._current_task = prefetcher
3405                         prefetcher.addExitListener(self._prefetch_exit)
3406                         return
3407
3408                 self._prefetch_exit(prefetcher)
3409
3410         def _prefetch_exit(self, prefetcher):
3411
3412                 pkg = self.pkg
3413                 pkg_count = self.pkg_count
3414                 if not (self.opts.pretend or self.opts.fetchonly):
3415                         self._build_dir.lock()
3416                         try:
3417                                 shutil.rmtree(self._build_dir.dir_path)
3418                         except EnvironmentError, e:
3419                                 if e.errno != errno.ENOENT:
3420                                         raise
3421                                 del e
3422                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3423                 fetcher = BinpkgFetcher(background=self.background,
3424                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3425                         pretend=self.opts.pretend, scheduler=self.scheduler)
3426                 pkg_path = fetcher.pkg_path
3427                 self._pkg_path = pkg_path
3428
3429                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3430
3431                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3432                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3433                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3434                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3435                         self.logger.log(msg, short_msg=short_msg)
3436                         self._start_task(fetcher, self._fetcher_exit)
3437                         return
3438
3439                 self._fetcher_exit(fetcher)
3440
3441         def _fetcher_exit(self, fetcher):
3442
3443                 # The fetcher only has a returncode when
3444                 # --getbinpkg is enabled.
3445                 if fetcher.returncode is not None:
3446                         self._fetched_pkg = True
3447                         if self._default_exit(fetcher) != os.EX_OK:
3448                                 self._unlock_builddir()
3449                                 self.wait()
3450                                 return
3451
3452                 if self.opts.pretend:
3453                         self._current_task = None
3454                         self.returncode = os.EX_OK
3455                         self.wait()
3456                         return
3457
3458                 verifier = None
3459                 if self._verify:
3460                         logfile = None
3461                         if self.background:
3462                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3463                         verifier = BinpkgVerifier(background=self.background,
3464                                 logfile=logfile, pkg=self.pkg)
3465                         self._start_task(verifier, self._verifier_exit)
3466                         return
3467
3468                 self._verifier_exit(verifier)
3469
3470         def _verifier_exit(self, verifier):
3471                 if verifier is not None and \
3472                         self._default_exit(verifier) != os.EX_OK:
3473                         self._unlock_builddir()
3474                         self.wait()
3475                         return
3476
3477                 logger = self.logger
3478                 pkg = self.pkg
3479                 pkg_count = self.pkg_count
3480                 pkg_path = self._pkg_path
3481
3482                 if self._fetched_pkg:
3483                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3484
3485                 if self.opts.fetchonly:
3486                         self._current_task = None
3487                         self.returncode = os.EX_OK
3488                         self.wait()
3489                         return
3490
3491                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3492                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3493                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3494                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3495                 logger.log(msg, short_msg=short_msg)
3496
3497                 phase = "clean"
3498                 settings = self.settings
3499                 ebuild_phase = EbuildPhase(background=self.background,
3500                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3501                         settings=settings, tree=self._tree)
3502
3503                 self._start_task(ebuild_phase, self._clean_exit)
3504
3505         def _clean_exit(self, clean_phase):
3506                 if self._default_exit(clean_phase) != os.EX_OK:
3507                         self._unlock_builddir()
3508                         self.wait()
3509                         return
3510
3511                 dir_path = self._build_dir.dir_path
3512
3513                 infloc = self._infloc
3514                 pkg = self.pkg
3515                 pkg_path = self._pkg_path
3516
3517                 dir_mode = 0755
3518                 for mydir in (dir_path, self._image_dir, infloc):
3519                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3520                                 gid=portage.data.portage_gid, mode=dir_mode)
3521
3522                 # This initializes PORTAGE_LOG_FILE.
3523                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3524                 self._writemsg_level(">>> Extracting info\n")
3525
3526                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3527                 check_missing_metadata = ("CATEGORY", "PF")
3528                 missing_metadata = set()
3529                 for k in check_missing_metadata:
3530                         v = pkg_xpak.getfile(k)
3531                         if not v:
3532                                 missing_metadata.add(k)
3533
3534                 pkg_xpak.unpackinfo(infloc)
3535                 for k in missing_metadata:
3536                         if k == "CATEGORY":
3537                                 v = pkg.category
3538                         elif k == "PF":
3539                                 v = pkg.pf
3540                         else:
3541                                 continue
3542
3543                         f = open(os.path.join(infloc, k), 'wb')
3544                         try:
3545                                 f.write(v + "\n")
3546                         finally:
3547                                 f.close()
3548
3549                 # Store the md5sum in the vdb.
3550                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3551                 try:
3552                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3553                 finally:
3554                         f.close()
3555
3556                 # This gives bashrc users an opportunity to do various things
3557                 # such as remove binary packages after they're installed.
3558                 settings = self.settings
3559                 settings.setcpv(self.pkg)
3560                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3561                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3562
3563                 phase = "setup"
3564                 setup_phase = EbuildPhase(background=self.background,
3565                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3566                         settings=settings, tree=self._tree)
3567
3568                 setup_phase.addExitListener(self._setup_exit)
3569                 self._current_task = setup_phase
3570                 self.scheduler.scheduleSetup(setup_phase)
3571
3572         def _setup_exit(self, setup_phase):
3573                 if self._default_exit(setup_phase) != os.EX_OK:
3574                         self._unlock_builddir()
3575                         self.wait()
3576                         return
3577
3578                 extractor = BinpkgExtractorAsync(background=self.background,
3579                         image_dir=self._image_dir,
3580                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3581                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3582                 self._start_task(extractor, self._extractor_exit)
3583
3584         def _extractor_exit(self, extractor):
3585                 if self._final_exit(extractor) != os.EX_OK:
3586                         self._unlock_builddir()
3587                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3588                                 noiselevel=-1)
3589                 self.wait()
3590
3591         def _unlock_builddir(self):
3592                 if self.opts.pretend or self.opts.fetchonly:
3593                         return
3594                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3595                 self._build_dir.unlock()
3596
3597         def install(self):
3598
3599                 # This gives bashrc users an opportunity to do various things
3600                 # such as remove binary packages after they're installed.
3601                 settings = self.settings
3602                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3603                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3604
3605                 merge = EbuildMerge(find_blockers=self.find_blockers,
3606                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3607                         pkg=self.pkg, pkg_count=self.pkg_count,
3608                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3609                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3610
3611                 try:
3612                         retval = merge.execute()
3613                 finally:
3614                         settings.pop("PORTAGE_BINPKG_FILE", None)
3615                         self._unlock_builddir()
3616                 return retval
3617
3618 class BinpkgFetcher(SpawnProcess):
3619
3620         __slots__ = ("pkg", "pretend",
3621                 "locked", "pkg_path", "_lock_obj")
3622
3623         def __init__(self, **kwargs):
3624                 SpawnProcess.__init__(self, **kwargs)
3625                 pkg = self.pkg
3626                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3627
3628         def _start(self):
3629
3630                 if self.cancelled:
3631                         return
3632
3633                 pkg = self.pkg
3634                 pretend = self.pretend
3635                 bintree = pkg.root_config.trees["bintree"]
3636                 settings = bintree.settings
3637                 use_locks = "distlocks" in settings.features
3638                 pkg_path = self.pkg_path
3639
3640                 if not pretend:
3641                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3642                         if use_locks:
3643                                 self.lock()
3644                 exists = os.path.exists(pkg_path)
3645                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3646                 if not (pretend or resume):
3647                         # Remove existing file or broken symlink.
3648                         try:
3649                                 os.unlink(pkg_path)
3650                         except OSError:
3651                                 pass
3652
3653                 # urljoin doesn't work correctly with
3654                 # unrecognized protocols like sftp
3655                 if bintree._remote_has_index:
3656                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3657                         if not rel_uri:
3658                                 rel_uri = pkg.cpv + ".tbz2"
3659                         uri = bintree._remote_base_uri.rstrip("/") + \
3660                                 "/" + rel_uri.lstrip("/")
3661                 else:
3662                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3663                                 "/" + pkg.pf + ".tbz2"
3664
3665                 if pretend:
3666                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3667                         self.returncode = os.EX_OK
3668                         self.wait()
3669                         return
3670
3671                 protocol = urlparse.urlparse(uri)[0]
3672                 fcmd_prefix = "FETCHCOMMAND"
3673                 if resume:
3674                         fcmd_prefix = "RESUMECOMMAND"
3675                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3676                 if not fcmd:
3677                         fcmd = settings.get(fcmd_prefix)
3678
3679                 fcmd_vars = {
3680                         "DISTDIR" : os.path.dirname(pkg_path),
3681                         "URI"     : uri,
3682                         "FILE"    : os.path.basename(pkg_path)
3683                 }
3684
3685                 fetch_env = dict(settings.iteritems())
3686                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3687                         for x in shlex.split(fcmd)]
3688
3689                 if self.fd_pipes is None:
3690                         self.fd_pipes = {}
3691                 fd_pipes = self.fd_pipes
3692
3693                 # Redirect all output to stdout since some fetchers like
3694                 # wget pollute stderr (if portage detects a problem then it
3695                 # can send it's own message to stderr).
3696                 fd_pipes.setdefault(0, sys.stdin.fileno())
3697                 fd_pipes.setdefault(1, sys.stdout.fileno())
3698                 fd_pipes.setdefault(2, sys.stdout.fileno())
3699
3700                 self.args = fetch_args
3701                 self.env = fetch_env
3702                 SpawnProcess._start(self)
3703
3704         def _set_returncode(self, wait_retval):
3705                 SpawnProcess._set_returncode(self, wait_retval)
3706                 if self.returncode == os.EX_OK:
3707                         # If possible, update the mtime to match the remote package if
3708                         # the fetcher didn't already do it automatically.
3709                         bintree = self.pkg.root_config.trees["bintree"]
3710                         if bintree._remote_has_index:
3711                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3712                                 if remote_mtime is not None:
3713                                         try:
3714                                                 remote_mtime = long(remote_mtime)
3715                                         except ValueError:
3716                                                 pass
3717                                         else:
3718                                                 try:
3719                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3720                                                 except OSError:
3721                                                         pass
3722                                                 else:
3723                                                         if remote_mtime != local_mtime:
3724                                                                 try:
3725                                                                         os.utime(self.pkg_path,
3726                                                                                 (remote_mtime, remote_mtime))
3727                                                                 except OSError:
3728                                                                         pass
3729
3730                 if self.locked:
3731                         self.unlock()
3732
3733         def lock(self):
3734                 """
3735                 This raises an AlreadyLocked exception if lock() is called
3736                 while a lock is already held. In order to avoid this, call
3737                 unlock() or check whether the "locked" attribute is True
3738                 or False before calling lock().
3739                 """
3740                 if self._lock_obj is not None:
3741                         raise self.AlreadyLocked((self._lock_obj,))
3742
3743                 self._lock_obj = portage.locks.lockfile(
3744                         self.pkg_path, wantnewlockfile=1)
3745                 self.locked = True
3746
3747         class AlreadyLocked(portage.exception.PortageException):
3748                 pass
3749
3750         def unlock(self):
3751                 if self._lock_obj is None:
3752                         return
3753                 portage.locks.unlockfile(self._lock_obj)
3754                 self._lock_obj = None
3755                 self.locked = False
3756
3757 class BinpkgVerifier(AsynchronousTask):
3758         __slots__ = ("logfile", "pkg",)
3759
3760         def _start(self):
3761                 """
3762                 Note: Unlike a normal AsynchronousTask.start() method,
3763                 this one does all work is synchronously. The returncode
3764                 attribute will be set before it returns.
3765                 """
3766
3767                 pkg = self.pkg
3768                 root_config = pkg.root_config
3769                 bintree = root_config.trees["bintree"]
3770                 rval = os.EX_OK
3771                 stdout_orig = sys.stdout
3772                 stderr_orig = sys.stderr
3773                 log_file = None
3774                 if self.background and self.logfile is not None:
3775                         log_file = open(self.logfile, 'a')
3776                 try:
3777                         if log_file is not None:
3778                                 sys.stdout = log_file
3779                                 sys.stderr = log_file
3780                         try:
3781                                 bintree.digestCheck(pkg)
3782                         except portage.exception.FileNotFound:
3783                                 writemsg("!!! Fetching Binary failed " + \
3784                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3785                                 rval = 1
3786                         except portage.exception.DigestException, e:
3787                                 writemsg("\n!!! Digest verification failed:\n",
3788                                         noiselevel=-1)
3789                                 writemsg("!!! %s\n" % e.value[0],
3790                                         noiselevel=-1)
3791                                 writemsg("!!! Reason: %s\n" % e.value[1],
3792                                         noiselevel=-1)
3793                                 writemsg("!!! Got: %s\n" % e.value[2],
3794                                         noiselevel=-1)
3795                                 writemsg("!!! Expected: %s\n" % e.value[3],
3796                                         noiselevel=-1)
3797                                 rval = 1
3798                         if rval != os.EX_OK:
3799                                 pkg_path = bintree.getname(pkg.cpv)
3800                                 head, tail = os.path.split(pkg_path)
3801                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3802                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3803                                         noiselevel=-1)
3804                 finally:
3805                         sys.stdout = stdout_orig
3806                         sys.stderr = stderr_orig
3807                         if log_file is not None:
3808                                 log_file.close()
3809
3810                 self.returncode = rval
3811                 self.wait()
3812
3813 class BinpkgPrefetcher(CompositeTask):
3814
3815         __slots__ = ("pkg",) + \
3816                 ("pkg_path", "_bintree",)
3817
3818         def _start(self):
3819                 self._bintree = self.pkg.root_config.trees["bintree"]
3820                 fetcher = BinpkgFetcher(background=self.background,
3821                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3822                         scheduler=self.scheduler)
3823                 self.pkg_path = fetcher.pkg_path
3824                 self._start_task(fetcher, self._fetcher_exit)
3825
3826         def _fetcher_exit(self, fetcher):
3827
3828                 if self._default_exit(fetcher) != os.EX_OK:
3829                         self.wait()
3830                         return
3831
3832                 verifier = BinpkgVerifier(background=self.background,
3833                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3834                 self._start_task(verifier, self._verifier_exit)
3835
3836         def _verifier_exit(self, verifier):
3837                 if self._default_exit(verifier) != os.EX_OK:
3838                         self.wait()
3839                         return
3840
3841                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3842
3843                 self._current_task = None
3844                 self.returncode = os.EX_OK
3845                 self.wait()
3846
3847 class BinpkgExtractorAsync(SpawnProcess):
3848
3849         __slots__ = ("image_dir", "pkg", "pkg_path")
3850
3851         _shell_binary = portage.const.BASH_BINARY
3852
3853         def _start(self):
3854                 self.args = [self._shell_binary, "-c",
3855                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3856                         (portage._shell_quote(self.pkg_path),
3857                         portage._shell_quote(self.image_dir))]
3858
3859                 self.env = self.pkg.root_config.settings.environ()
3860                 SpawnProcess._start(self)
3861
3862 class MergeListItem(CompositeTask):
3863
3864         """
3865         TODO: For parallel scheduling, everything here needs asynchronous
3866         execution support (start, poll, and wait methods).
3867         """
3868
3869         __slots__ = ("args_set",
3870                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3871                 "find_blockers", "logger", "mtimedb", "pkg",
3872                 "pkg_count", "pkg_to_replace", "prefetcher",
3873                 "settings", "statusMessage", "world_atom") + \
3874                 ("_install_task",)
3875
3876         def _start(self):
3877
3878                 pkg = self.pkg
3879                 build_opts = self.build_opts
3880
3881                 if pkg.installed:
3882                         # uninstall,  executed by self.merge()
3883                         self.returncode = os.EX_OK
3884                         self.wait()
3885                         return
3886
3887                 args_set = self.args_set
3888                 find_blockers = self.find_blockers
3889                 logger = self.logger
3890                 mtimedb = self.mtimedb
3891                 pkg_count = self.pkg_count
3892                 scheduler = self.scheduler
3893                 settings = self.settings
3894                 world_atom = self.world_atom
3895                 ldpath_mtimes = mtimedb["ldpath"]
3896
3897                 action_desc = "Emerging"
3898                 preposition = "for"
3899                 if pkg.type_name == "binary":
3900                         action_desc += " binary"
3901
3902                 if build_opts.fetchonly:
3903                         action_desc = "Fetching"
3904
3905                 msg = "%s (%s of %s) %s" % \
3906                         (action_desc,
3907                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3908                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3909                         colorize("GOOD", pkg.cpv))
3910
3911                 portdb = pkg.root_config.trees["porttree"].dbapi
3912                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3913                 if portdir_repo_name:
3914                         pkg_repo_name = pkg.metadata.get("repository")
3915                         if pkg_repo_name != portdir_repo_name:
3916                                 if not pkg_repo_name:
3917                                         pkg_repo_name = "unknown repo"
3918                                 msg += " from %s" % pkg_repo_name
3919
3920                 if pkg.root != "/":
3921                         msg += " %s %s" % (preposition, pkg.root)
3922
3923                 if not build_opts.pretend:
3924                         self.statusMessage(msg)
3925                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3926                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3927
3928                 if pkg.type_name == "ebuild":
3929
3930                         build = EbuildBuild(args_set=args_set,
3931                                 background=self.background,
3932                                 config_pool=self.config_pool,
3933                                 find_blockers=find_blockers,
3934                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3935                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3936                                 prefetcher=self.prefetcher, scheduler=scheduler,
3937                                 settings=settings, world_atom=world_atom)
3938
3939                         self._install_task = build
3940                         self._start_task(build, self._default_final_exit)
3941                         return
3942
3943                 elif pkg.type_name == "binary":
3944
3945                         binpkg = Binpkg(background=self.background,
3946                                 find_blockers=find_blockers,
3947                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3948                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3949                                 prefetcher=self.prefetcher, settings=settings,
3950                                 scheduler=scheduler, world_atom=world_atom)
3951
3952                         self._install_task = binpkg
3953                         self._start_task(binpkg, self._default_final_exit)
3954                         return
3955
3956         def _poll(self):
3957                 self._install_task.poll()
3958                 return self.returncode
3959
3960         def _wait(self):
3961                 self._install_task.wait()
3962                 return self.returncode
3963
3964         def merge(self):
3965
3966                 pkg = self.pkg
3967                 build_opts = self.build_opts
3968                 find_blockers = self.find_blockers
3969                 logger = self.logger
3970                 mtimedb = self.mtimedb
3971                 pkg_count = self.pkg_count
3972                 prefetcher = self.prefetcher
3973                 scheduler = self.scheduler
3974                 settings = self.settings
3975                 world_atom = self.world_atom
3976                 ldpath_mtimes = mtimedb["ldpath"]
3977
3978                 if pkg.installed:
3979                         if not (build_opts.buildpkgonly or \
3980                                 build_opts.fetchonly or build_opts.pretend):
3981
3982                                 uninstall = PackageUninstall(background=self.background,
3983                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3984                                         pkg=pkg, scheduler=scheduler, settings=settings)
3985
3986                                 uninstall.start()
3987                                 retval = uninstall.wait()
3988                                 if retval != os.EX_OK:
3989                                         return retval
3990                         return os.EX_OK
3991
3992                 if build_opts.fetchonly or \
3993                         build_opts.buildpkgonly:
3994                         return self.returncode
3995
3996                 retval = self._install_task.install()
3997                 return retval
3998
3999 class PackageMerge(AsynchronousTask):
4000         """
4001         TODO: Implement asynchronous merge so that the scheduler can
4002         run while a merge is executing.
4003         """
4004
4005         __slots__ = ("merge",)
4006
4007         def _start(self):
4008
4009                 pkg = self.merge.pkg
4010                 pkg_count = self.merge.pkg_count
4011
4012                 if pkg.installed:
4013                         action_desc = "Uninstalling"
4014                         preposition = "from"
4015                 else:
4016                         action_desc = "Installing"
4017                         preposition = "to"
4018
4019                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4020
4021                 if pkg.root != "/":
4022                         msg += " %s %s" % (preposition, pkg.root)
4023
4024                 if not self.merge.build_opts.fetchonly and \
4025                         not self.merge.build_opts.pretend and \
4026                         not self.merge.build_opts.buildpkgonly:
4027                         self.merge.statusMessage(msg)
4028
4029                 self.returncode = self.merge.merge()
4030                 self.wait()
4031
4032 class DependencyArg(object):
4033         def __init__(self, arg=None, root_config=None):
4034                 self.arg = arg
4035                 self.root_config = root_config
4036
4037         def __str__(self):
4038                 return str(self.arg)
4039
4040 class AtomArg(DependencyArg):
4041         def __init__(self, atom=None, **kwargs):
4042                 DependencyArg.__init__(self, **kwargs)
4043                 self.atom = atom
4044                 if not isinstance(self.atom, portage.dep.Atom):
4045                         self.atom = portage.dep.Atom(self.atom)
4046                 self.set = (self.atom, )
4047
4048 class PackageArg(DependencyArg):
4049         def __init__(self, package=None, **kwargs):
4050                 DependencyArg.__init__(self, **kwargs)
4051                 self.package = package
4052                 self.atom = portage.dep.Atom("=" + package.cpv)
4053                 self.set = (self.atom, )
4054
4055 class SetArg(DependencyArg):
4056         def __init__(self, set=None, **kwargs):
4057                 DependencyArg.__init__(self, **kwargs)
4058                 self.set = set
4059                 self.name = self.arg[len(SETPREFIX):]
4060
4061 class Dependency(SlotObject):
4062         __slots__ = ("atom", "blocker", "depth",
4063                 "parent", "onlydeps", "priority", "root")
4064         def __init__(self, **kwargs):
4065                 SlotObject.__init__(self, **kwargs)
4066                 if self.priority is None:
4067                         self.priority = DepPriority()
4068                 if self.depth is None:
4069                         self.depth = 0
4070
4071 class BlockerCache(portage.cache.mappings.MutableMapping):
4072         """This caches blockers of installed packages so that dep_check does not
4073         have to be done for every single installed package on every invocation of
4074         emerge.  The cache is invalidated whenever it is detected that something
4075         has changed that might alter the results of dep_check() calls:
4076                 1) the set of installed packages (including COUNTER) has changed
4077                 2) the old-style virtuals have changed
4078         """
4079
4080         # Number of uncached packages to trigger cache update, since
4081         # it's wasteful to update it for every vdb change.
4082         _cache_threshold = 5
4083
4084         class BlockerData(object):
4085
4086                 __slots__ = ("__weakref__", "atoms", "counter")
4087
4088                 def __init__(self, counter, atoms):
4089                         self.counter = counter
4090                         self.atoms = atoms
4091
4092         def __init__(self, myroot, vardb):
4093                 self._vardb = vardb
4094                 self._virtuals = vardb.settings.getvirtuals()
4095                 self._cache_filename = os.path.join(myroot,
4096                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4097                 self._cache_version = "1"
4098                 self._cache_data = None
4099                 self._modified = set()
4100                 self._load()
4101
4102         def _load(self):
4103                 try:
4104                         f = open(self._cache_filename, mode='rb')
4105                         mypickle = pickle.Unpickler(f)
4106                         try:
4107                                 mypickle.find_global = None
4108                         except AttributeError:
4109                                 # TODO: If py3k, override Unpickler.find_class().
4110                                 pass
4111                         self._cache_data = mypickle.load()
4112                         f.close()
4113                         del f
4114                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4115                         if isinstance(e, pickle.UnpicklingError):
4116                                 writemsg("!!! Error loading '%s': %s\n" % \
4117                                         (self._cache_filename, str(e)), noiselevel=-1)
4118                         del e
4119
4120                 cache_valid = self._cache_data and \
4121                         isinstance(self._cache_data, dict) and \
4122                         self._cache_data.get("version") == self._cache_version and \
4123                         isinstance(self._cache_data.get("blockers"), dict)
4124                 if cache_valid:
4125                         # Validate all the atoms and counters so that
4126                         # corruption is detected as soon as possible.
4127                         invalid_items = set()
4128                         for k, v in self._cache_data["blockers"].iteritems():
4129                                 if not isinstance(k, basestring):
4130                                         invalid_items.add(k)
4131                                         continue
4132                                 try:
4133                                         if portage.catpkgsplit(k) is None:
4134                                                 invalid_items.add(k)
4135                                                 continue
4136                                 except portage.exception.InvalidData:
4137                                         invalid_items.add(k)
4138                                         continue
4139                                 if not isinstance(v, tuple) or \
4140                                         len(v) != 2:
4141                                         invalid_items.add(k)
4142                                         continue
4143                                 counter, atoms = v
4144                                 if not isinstance(counter, (int, long)):
4145                                         invalid_items.add(k)
4146                                         continue
4147                                 if not isinstance(atoms, (list, tuple)):
4148                                         invalid_items.add(k)
4149                                         continue
4150                                 invalid_atom = False
4151                                 for atom in atoms:
4152                                         if not isinstance(atom, basestring):
4153                                                 invalid_atom = True
4154                                                 break
4155                                         if atom[:1] != "!" or \
4156                                                 not portage.isvalidatom(
4157                                                 atom, allow_blockers=True):
4158                                                 invalid_atom = True
4159                                                 break
4160                                 if invalid_atom:
4161                                         invalid_items.add(k)
4162                                         continue
4163
4164                         for k in invalid_items:
4165                                 del self._cache_data["blockers"][k]
4166                         if not self._cache_data["blockers"]:
4167                                 cache_valid = False
4168
4169                 if not cache_valid:
4170                         self._cache_data = {"version":self._cache_version}
4171                         self._cache_data["blockers"] = {}
4172                         self._cache_data["virtuals"] = self._virtuals
4173                 self._modified.clear()
4174
4175         def flush(self):
4176                 """If the current user has permission and the internal blocker cache
4177                 been updated, save it to disk and mark it unmodified.  This is called
4178                 by emerge after it has proccessed blockers for all installed packages.
4179                 Currently, the cache is only written if the user has superuser
4180                 privileges (since that's required to obtain a lock), but all users
4181                 have read access and benefit from faster blocker lookups (as long as
4182                 the entire cache is still valid).  The cache is stored as a pickled
4183                 dict object with the following format:
4184
4185                 {
4186                         version : "1",
4187                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4188                         "virtuals" : vardb.settings.getvirtuals()
4189                 }
4190                 """
4191                 if len(self._modified) >= self._cache_threshold and \
4192                         secpass >= 2:
4193                         try:
4194                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4195                                 pickle.dump(self._cache_data, f, protocol=2)
4196                                 f.close()
4197                                 portage.util.apply_secpass_permissions(
4198                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4199                         except (IOError, OSError), e:
4200                                 pass
4201                         self._modified.clear()
4202
4203         def __setitem__(self, cpv, blocker_data):
4204                 """
4205                 Update the cache and mark it as modified for a future call to
4206                 self.flush().
4207
4208                 @param cpv: Package for which to cache blockers.
4209                 @type cpv: String
4210                 @param blocker_data: An object with counter and atoms attributes.
4211                 @type blocker_data: BlockerData
4212                 """
4213                 self._cache_data["blockers"][cpv] = \
4214                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4215                 self._modified.add(cpv)
4216
4217         def __iter__(self):
4218                 if self._cache_data is None:
4219                         # triggered by python-trace
4220                         return iter([])
4221                 return iter(self._cache_data["blockers"])
4222
4223         def __delitem__(self, cpv):
4224                 del self._cache_data["blockers"][cpv]
4225
4226         def __getitem__(self, cpv):
4227                 """
4228                 @rtype: BlockerData
4229                 @returns: An object with counter and atoms attributes.
4230                 """
4231                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4232
4233 class BlockerDB(object):
4234
4235         def __init__(self, root_config):
4236                 self._root_config = root_config
4237                 self._vartree = root_config.trees["vartree"]
4238                 self._portdb = root_config.trees["porttree"].dbapi
4239
4240                 self._dep_check_trees = None
4241                 self._fake_vartree = None
4242
4243         def _get_fake_vartree(self, acquire_lock=0):
4244                 fake_vartree = self._fake_vartree
4245                 if fake_vartree is None:
4246                         fake_vartree = FakeVartree(self._root_config,
4247                                 acquire_lock=acquire_lock)
4248                         self._fake_vartree = fake_vartree
4249                         self._dep_check_trees = { self._vartree.root : {
4250                                 "porttree"    :  fake_vartree,
4251                                 "vartree"     :  fake_vartree,
4252                         }}
4253                 else:
4254                         fake_vartree.sync(acquire_lock=acquire_lock)
4255                 return fake_vartree
4256
4257         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4258                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4259                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4260                 settings = self._vartree.settings
4261                 stale_cache = set(blocker_cache)
4262                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4263                 dep_check_trees = self._dep_check_trees
4264                 vardb = fake_vartree.dbapi
4265                 installed_pkgs = list(vardb)
4266
4267                 for inst_pkg in installed_pkgs:
4268                         stale_cache.discard(inst_pkg.cpv)
4269                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4270                         if cached_blockers is not None and \
4271                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4272                                 cached_blockers = None
4273                         if cached_blockers is not None:
4274                                 blocker_atoms = cached_blockers.atoms
4275                         else:
4276                                 # Use aux_get() to trigger FakeVartree global
4277                                 # updates on *DEPEND when appropriate.
4278                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4279                                 try:
4280                                         portage.dep._dep_check_strict = False
4281                                         success, atoms = portage.dep_check(depstr,
4282                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4283                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4284                                 finally:
4285                                         portage.dep._dep_check_strict = True
4286                                 if not success:
4287                                         pkg_location = os.path.join(inst_pkg.root,
4288                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4289                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4290                                                 (pkg_location, atoms), noiselevel=-1)
4291                                         continue
4292
4293                                 blocker_atoms = [atom for atom in atoms \
4294                                         if atom.startswith("!")]
4295                                 blocker_atoms.sort()
4296                                 counter = long(inst_pkg.metadata["COUNTER"])
4297                                 blocker_cache[inst_pkg.cpv] = \
4298                                         blocker_cache.BlockerData(counter, blocker_atoms)
4299                 for cpv in stale_cache:
4300                         del blocker_cache[cpv]
4301                 blocker_cache.flush()
4302
4303                 blocker_parents = digraph()
4304                 blocker_atoms = []
4305                 for pkg in installed_pkgs:
4306                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4307                                 blocker_atom = blocker_atom.lstrip("!")
4308                                 blocker_atoms.append(blocker_atom)
4309                                 blocker_parents.add(blocker_atom, pkg)
4310
4311                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4312                 blocking_pkgs = set()
4313                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4314                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4315
4316                 # Check for blockers in the other direction.
4317                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4318                 try:
4319                         portage.dep._dep_check_strict = False
4320                         success, atoms = portage.dep_check(depstr,
4321                                 vardb, settings, myuse=new_pkg.use.enabled,
4322                                 trees=dep_check_trees, myroot=new_pkg.root)
4323                 finally:
4324                         portage.dep._dep_check_strict = True
4325                 if not success:
4326                         # We should never get this far with invalid deps.
4327                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4328                         assert False
4329
4330                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4331                         if atom[:1] == "!"]
4332                 if blocker_atoms:
4333                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4334                         for inst_pkg in installed_pkgs:
4335                                 try:
4336                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4337                                 except (portage.exception.InvalidDependString, StopIteration):
4338                                         continue
4339                                 blocking_pkgs.add(inst_pkg)
4340
4341                 return blocking_pkgs
4342
4343 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4344
4345         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4346                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4347         p_type, p_root, p_key, p_status = parent_node
4348         msg = []
4349         if p_status == "nomerge":
4350                 category, pf = portage.catsplit(p_key)
4351                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4352                 msg.append("Portage is unable to process the dependencies of the ")
4353                 msg.append("'%s' package. " % p_key)
4354                 msg.append("In order to correct this problem, the package ")
4355                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4356                 msg.append("As a temporary workaround, the --nodeps option can ")
4357                 msg.append("be used to ignore all dependencies.  For reference, ")
4358                 msg.append("the problematic dependencies can be found in the ")
4359                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4360         else:
4361                 msg.append("This package can not be installed. ")
4362                 msg.append("Please notify the '%s' package maintainer " % p_key)
4363                 msg.append("about this problem.")
4364
4365         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4366         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4367
4368 class PackageVirtualDbapi(portage.dbapi):
4369         """
4370         A dbapi-like interface class that represents the state of the installed
4371         package database as new packages are installed, replacing any packages
4372         that previously existed in the same slot. The main difference between
4373         this class and fakedbapi is that this one uses Package instances
4374         internally (passed in via cpv_inject() and cpv_remove() calls).
4375         """
4376         def __init__(self, settings):
4377                 portage.dbapi.__init__(self)
4378                 self.settings = settings
4379                 self._match_cache = {}
4380                 self._cp_map = {}
4381                 self._cpv_map = {}
4382
4383         def clear(self):
4384                 """
4385                 Remove all packages.
4386                 """
4387                 if self._cpv_map:
4388                         self._clear_cache()
4389                         self._cp_map.clear()
4390                         self._cpv_map.clear()
4391
4392         def copy(self):
4393                 obj = PackageVirtualDbapi(self.settings)
4394                 obj._match_cache = self._match_cache.copy()
4395                 obj._cp_map = self._cp_map.copy()
4396                 for k, v in obj._cp_map.iteritems():
4397                         obj._cp_map[k] = v[:]
4398                 obj._cpv_map = self._cpv_map.copy()
4399                 return obj
4400
4401         def __iter__(self):
4402                 return self._cpv_map.itervalues()
4403
4404         def __contains__(self, item):
4405                 existing = self._cpv_map.get(item.cpv)
4406                 if existing is not None and \
4407                         existing == item:
4408                         return True
4409                 return False
4410
4411         def get(self, item, default=None):
4412                 cpv = getattr(item, "cpv", None)
4413                 if cpv is None:
4414                         if len(item) != 4:
4415                                 return default
4416                         type_name, root, cpv, operation = item
4417
4418                 existing = self._cpv_map.get(cpv)
4419                 if existing is not None and \
4420                         existing == item:
4421                         return existing
4422                 return default
4423
4424         def match_pkgs(self, atom):
4425                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4426
4427         def _clear_cache(self):
4428                 if self._categories is not None:
4429                         self._categories = None
4430                 if self._match_cache:
4431                         self._match_cache = {}
4432
4433         def match(self, origdep, use_cache=1):
4434                 result = self._match_cache.get(origdep)
4435                 if result is not None:
4436                         return result[:]
4437                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4438                 self._match_cache[origdep] = result
4439                 return result[:]
4440
4441         def cpv_exists(self, cpv):
4442                 return cpv in self._cpv_map
4443
4444         def cp_list(self, mycp, use_cache=1):
4445                 cachelist = self._match_cache.get(mycp)
4446                 # cp_list() doesn't expand old-style virtuals
4447                 if cachelist and cachelist[0].startswith(mycp):
4448                         return cachelist[:]
4449                 cpv_list = self._cp_map.get(mycp)
4450                 if cpv_list is None:
4451                         cpv_list = []
4452                 else:
4453                         cpv_list = [pkg.cpv for pkg in cpv_list]
4454                 self._cpv_sort_ascending(cpv_list)
4455                 if not (not cpv_list and mycp.startswith("virtual/")):
4456                         self._match_cache[mycp] = cpv_list
4457                 return cpv_list[:]
4458
4459         def cp_all(self):
4460                 return list(self._cp_map)
4461
4462         def cpv_all(self):
4463                 return list(self._cpv_map)
4464
4465         def cpv_inject(self, pkg):
4466                 cp_list = self._cp_map.get(pkg.cp)
4467                 if cp_list is None:
4468                         cp_list = []
4469                         self._cp_map[pkg.cp] = cp_list
4470                 e_pkg = self._cpv_map.get(pkg.cpv)
4471                 if e_pkg is not None:
4472                         if e_pkg == pkg:
4473                                 return
4474                         self.cpv_remove(e_pkg)
4475                 for e_pkg in cp_list:
4476                         if e_pkg.slot_atom == pkg.slot_atom:
4477                                 if e_pkg == pkg:
4478                                         return
4479                                 self.cpv_remove(e_pkg)
4480                                 break
4481                 cp_list.append(pkg)
4482                 self._cpv_map[pkg.cpv] = pkg
4483                 self._clear_cache()
4484
4485         def cpv_remove(self, pkg):
4486                 old_pkg = self._cpv_map.get(pkg.cpv)
4487                 if old_pkg != pkg:
4488                         raise KeyError(pkg)
4489                 self._cp_map[pkg.cp].remove(pkg)
4490                 del self._cpv_map[pkg.cpv]
4491                 self._clear_cache()
4492
4493         def aux_get(self, cpv, wants):
4494                 metadata = self._cpv_map[cpv].metadata
4495                 return [metadata.get(x, "") for x in wants]
4496
4497         def aux_update(self, cpv, values):
4498                 self._cpv_map[cpv].metadata.update(values)
4499                 self._clear_cache()
4500
4501 class depgraph(object):
4502
4503         pkg_tree_map = RootConfig.pkg_tree_map
4504
4505         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4506
4507         def __init__(self, settings, trees, myopts, myparams, spinner):
4508                 self.settings = settings
4509                 self.target_root = settings["ROOT"]
4510                 self.myopts = myopts
4511                 self.myparams = myparams
4512                 self.edebug = 0
4513                 if settings.get("PORTAGE_DEBUG", "") == "1":
4514                         self.edebug = 1
4515                 self.spinner = spinner
4516                 self._running_root = trees["/"]["root_config"]
4517                 self._opts_no_restart = Scheduler._opts_no_restart
4518                 self.pkgsettings = {}
4519                 # Maps slot atom to package for each Package added to the graph.
4520                 self._slot_pkg_map = {}
4521                 # Maps nodes to the reasons they were selected for reinstallation.
4522                 self._reinstall_nodes = {}
4523                 self.mydbapi = {}
4524                 self.trees = {}
4525                 self._trees_orig = trees
4526                 self.roots = {}
4527                 # Contains a filtered view of preferred packages that are selected
4528                 # from available repositories.
4529                 self._filtered_trees = {}
4530                 # Contains installed packages and new packages that have been added
4531                 # to the graph.
4532                 self._graph_trees = {}
4533                 # All Package instances
4534                 self._pkg_cache = {}
4535                 for myroot in trees:
4536                         self.trees[myroot] = {}
4537                         # Create a RootConfig instance that references
4538                         # the FakeVartree instead of the real one.
4539                         self.roots[myroot] = RootConfig(
4540                                 trees[myroot]["vartree"].settings,
4541                                 self.trees[myroot],
4542                                 trees[myroot]["root_config"].setconfig)
4543                         for tree in ("porttree", "bintree"):
4544                                 self.trees[myroot][tree] = trees[myroot][tree]
4545                         self.trees[myroot]["vartree"] = \
4546                                 FakeVartree(trees[myroot]["root_config"],
4547                                         pkg_cache=self._pkg_cache)
4548                         self.pkgsettings[myroot] = portage.config(
4549                                 clone=self.trees[myroot]["vartree"].settings)
4550                         self._slot_pkg_map[myroot] = {}
4551                         vardb = self.trees[myroot]["vartree"].dbapi
4552                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4553                                 "--buildpkgonly" not in self.myopts
4554                         # This fakedbapi instance will model the state that the vdb will
4555                         # have after new packages have been installed.
4556                         fakedb = PackageVirtualDbapi(vardb.settings)
4557                         if preload_installed_pkgs:
4558                                 for pkg in vardb:
4559                                         self.spinner.update()
4560                                         # This triggers metadata updates via FakeVartree.
4561                                         vardb.aux_get(pkg.cpv, [])
4562                                         fakedb.cpv_inject(pkg)
4563
4564                         # Now that the vardb state is cached in our FakeVartree,
4565                         # we won't be needing the real vartree cache for awhile.
4566                         # To make some room on the heap, clear the vardbapi
4567                         # caches.
4568                         trees[myroot]["vartree"].dbapi._clear_cache()
4569                         gc.collect()
4570
4571                         self.mydbapi[myroot] = fakedb
4572                         def graph_tree():
4573                                 pass
4574                         graph_tree.dbapi = fakedb
4575                         self._graph_trees[myroot] = {}
4576                         self._filtered_trees[myroot] = {}
4577                         # Substitute the graph tree for the vartree in dep_check() since we
4578                         # want atom selections to be consistent with package selections
4579                         # have already been made.
4580                         self._graph_trees[myroot]["porttree"]   = graph_tree
4581                         self._graph_trees[myroot]["vartree"]    = graph_tree
4582                         def filtered_tree():
4583                                 pass
4584                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4585                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4586
4587                         # Passing in graph_tree as the vartree here could lead to better
4588                         # atom selections in some cases by causing atoms for packages that
4589                         # have been added to the graph to be preferred over other choices.
4590                         # However, it can trigger atom selections that result in
4591                         # unresolvable direct circular dependencies. For example, this
4592                         # happens with gwydion-dylan which depends on either itself or
4593                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4594                         # gwydion-dylan-bin needs to be selected in order to avoid a
4595                         # an unresolvable direct circular dependency.
4596                         #
4597                         # To solve the problem described above, pass in "graph_db" so that
4598                         # packages that have been added to the graph are distinguishable
4599                         # from other available packages and installed packages. Also, pass
4600                         # the parent package into self._select_atoms() calls so that
4601                         # unresolvable direct circular dependencies can be detected and
4602                         # avoided when possible.
4603                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4604                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4605
4606                         dbs = []
4607                         portdb = self.trees[myroot]["porttree"].dbapi
4608                         bindb  = self.trees[myroot]["bintree"].dbapi
4609                         vardb  = self.trees[myroot]["vartree"].dbapi
4610                         #               (db, pkg_type, built, installed, db_keys)
4611                         if "--usepkgonly" not in self.myopts:
4612                                 db_keys = list(portdb._aux_cache_keys)
4613                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4614                         if "--usepkg" in self.myopts:
4615                                 db_keys = list(bindb._aux_cache_keys)
4616                                 dbs.append((bindb,  "binary", True, False, db_keys))
4617                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4618                         dbs.append((vardb, "installed", True, True, db_keys))
4619                         self._filtered_trees[myroot]["dbs"] = dbs
4620                         if "--usepkg" in self.myopts:
4621                                 self.trees[myroot]["bintree"].populate(
4622                                         "--getbinpkg" in self.myopts,
4623                                         "--getbinpkgonly" in self.myopts)
4624                 del trees
4625
4626                 self.digraph=portage.digraph()
4627                 # contains all sets added to the graph
4628                 self._sets = {}
4629                 # contains atoms given as arguments
4630                 self._sets["args"] = InternalPackageSet()
4631                 # contains all atoms from all sets added to the graph, including
4632                 # atoms given as arguments
4633                 self._set_atoms = InternalPackageSet()
4634                 self._atom_arg_map = {}
4635                 # contains all nodes pulled in by self._set_atoms
4636                 self._set_nodes = set()
4637                 # Contains only Blocker -> Uninstall edges
4638                 self._blocker_uninstalls = digraph()
4639                 # Contains only Package -> Blocker edges
4640                 self._blocker_parents = digraph()
4641                 # Contains only irrelevant Package -> Blocker edges
4642                 self._irrelevant_blockers = digraph()
4643                 # Contains only unsolvable Package -> Blocker edges
4644                 self._unsolvable_blockers = digraph()
4645                 # Contains all Blocker -> Blocked Package edges
4646                 self._blocked_pkgs = digraph()
4647                 # Contains world packages that have been protected from
4648                 # uninstallation but may not have been added to the graph
4649                 # if the graph is not complete yet.
4650                 self._blocked_world_pkgs = {}
4651                 self._slot_collision_info = {}
4652                 # Slot collision nodes are not allowed to block other packages since
4653                 # blocker validation is only able to account for one package per slot.
4654                 self._slot_collision_nodes = set()
4655                 self._parent_atoms = {}
4656                 self._slot_conflict_parent_atoms = set()
4657                 self._serialized_tasks_cache = None
4658                 self._scheduler_graph = None
4659                 self._displayed_list = None
4660                 self._pprovided_args = []
4661                 self._missing_args = []
4662                 self._masked_installed = set()
4663                 self._unsatisfied_deps_for_display = []
4664                 self._unsatisfied_blockers_for_display = None
4665                 self._circular_deps_for_display = None
4666                 self._dep_stack = []
4667                 self._unsatisfied_deps = []
4668                 self._initially_unsatisfied_deps = []
4669                 self._ignored_deps = []
4670                 self._required_set_names = set(["system", "world"])
4671                 self._select_atoms = self._select_atoms_highest_available
4672                 self._select_package = self._select_pkg_highest_available
4673                 self._highest_pkg_cache = {}
4674
4675         def _show_slot_collision_notice(self):
4676                 """Show an informational message advising the user to mask one of the
4677                 the packages. In some cases it may be possible to resolve this
4678                 automatically, but support for backtracking (removal nodes that have
4679                 already been selected) will be required in order to handle all possible
4680                 cases.
4681                 """
4682
4683                 if not self._slot_collision_info:
4684                         return
4685
4686                 self._show_merge_list()
4687
4688                 msg = []
4689                 msg.append("\n!!! Multiple package instances within a single " + \
4690                         "package slot have been pulled\n")
4691                 msg.append("!!! into the dependency graph, resulting" + \
4692                         " in a slot conflict:\n\n")
4693                 indent = "  "
4694                 # Max number of parents shown, to avoid flooding the display.
4695                 max_parents = 3
4696                 explanation_columns = 70
4697                 explanations = 0
4698                 for (slot_atom, root), slot_nodes \
4699                         in self._slot_collision_info.iteritems():
4700                         msg.append(str(slot_atom))
4701                         msg.append("\n\n")
4702
4703                         for node in slot_nodes:
4704                                 msg.append(indent)
4705                                 msg.append(str(node))
4706                                 parent_atoms = self._parent_atoms.get(node)
4707                                 if parent_atoms:
4708                                         pruned_list = set()
4709                                         # Prefer conflict atoms over others.
4710                                         for parent_atom in parent_atoms:
4711                                                 if len(pruned_list) >= max_parents:
4712                                                         break
4713                                                 if parent_atom in self._slot_conflict_parent_atoms:
4714                                                         pruned_list.add(parent_atom)
4715
4716                                         # If this package was pulled in by conflict atoms then
4717                                         # show those alone since those are the most interesting.
4718                                         if not pruned_list:
4719                                                 # When generating the pruned list, prefer instances
4720                                                 # of DependencyArg over instances of Package.
4721                                                 for parent_atom in parent_atoms:
4722                                                         if len(pruned_list) >= max_parents:
4723                                                                 break
4724                                                         parent, atom = parent_atom
4725                                                         if isinstance(parent, DependencyArg):
4726                                                                 pruned_list.add(parent_atom)
4727                                                 # Prefer Packages instances that themselves have been
4728                                                 # pulled into collision slots.
4729                                                 for parent_atom in parent_atoms:
4730                                                         if len(pruned_list) >= max_parents:
4731                                                                 break
4732                                                         parent, atom = parent_atom
4733                                                         if isinstance(parent, Package) and \
4734                                                                 (parent.slot_atom, parent.root) \
4735                                                                 in self._slot_collision_info:
4736                                                                 pruned_list.add(parent_atom)
4737                                                 for parent_atom in parent_atoms:
4738                                                         if len(pruned_list) >= max_parents:
4739                                                                 break
4740                                                         pruned_list.add(parent_atom)
4741                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4742                                         parent_atoms = pruned_list
4743                                         msg.append(" pulled in by\n")
4744                                         for parent_atom in parent_atoms:
4745                                                 parent, atom = parent_atom
4746                                                 msg.append(2*indent)
4747                                                 if isinstance(parent,
4748                                                         (PackageArg, AtomArg)):
4749                                                         # For PackageArg and AtomArg types, it's
4750                                                         # redundant to display the atom attribute.
4751                                                         msg.append(str(parent))
4752                                                 else:
4753                                                         # Display the specific atom from SetArg or
4754                                                         # Package types.
4755                                                         msg.append("%s required by %s" % (atom, parent))
4756                                                 msg.append("\n")
4757                                         if omitted_parents:
4758                                                 msg.append(2*indent)
4759                                                 msg.append("(and %d more)\n" % omitted_parents)
4760                                 else:
4761                                         msg.append(" (no parents)\n")
4762                                 msg.append("\n")
4763                         explanation = self._slot_conflict_explanation(slot_nodes)
4764                         if explanation:
4765                                 explanations += 1
4766                                 msg.append(indent + "Explanation:\n\n")
4767                                 for line in textwrap.wrap(explanation, explanation_columns):
4768                                         msg.append(2*indent + line + "\n")
4769                                 msg.append("\n")
4770                 msg.append("\n")
4771                 sys.stderr.write("".join(msg))
4772                 sys.stderr.flush()
4773
4774                 explanations_for_all = explanations == len(self._slot_collision_info)
4775
4776                 if explanations_for_all or "--quiet" in self.myopts:
4777                         return
4778
4779                 msg = []
4780                 msg.append("It may be possible to solve this problem ")
4781                 msg.append("by using package.mask to prevent one of ")
4782                 msg.append("those packages from being selected. ")
4783                 msg.append("However, it is also possible that conflicting ")
4784                 msg.append("dependencies exist such that they are impossible to ")
4785                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4786                 msg.append("the dependencies of two different packages, then those ")
4787                 msg.append("packages can not be installed simultaneously.")
4788
4789                 from formatter import AbstractFormatter, DumbWriter
4790                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4791                 for x in msg:
4792                         f.add_flowing_data(x)
4793                 f.end_paragraph(1)
4794
4795                 msg = []
4796                 msg.append("For more information, see MASKED PACKAGES ")
4797                 msg.append("section in the emerge man page or refer ")
4798                 msg.append("to the Gentoo Handbook.")
4799                 for x in msg:
4800                         f.add_flowing_data(x)
4801                 f.end_paragraph(1)
4802                 f.writer.flush()
4803
4804         def _slot_conflict_explanation(self, slot_nodes):
4805                 """
4806                 When a slot conflict occurs due to USE deps, there are a few
4807                 different cases to consider:
4808
4809                 1) New USE are correctly set but --newuse wasn't requested so an
4810                    installed package with incorrect USE happened to get pulled
4811                    into graph before the new one.
4812
4813                 2) New USE are incorrectly set but an installed package has correct
4814                    USE so it got pulled into the graph, and a new instance also got
4815                    pulled in due to --newuse or an upgrade.
4816
4817                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4818                    and multiple package instances got pulled into the same slot to
4819                    satisfy the conflicting deps.
4820
4821                 Currently, explanations and suggested courses of action are generated
4822                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4823                 """
4824
4825                 if len(slot_nodes) != 2:
4826                         # Suggestions are only implemented for
4827                         # conflicts between two packages.
4828                         return None
4829
4830                 all_conflict_atoms = self._slot_conflict_parent_atoms
4831                 matched_node = None
4832                 matched_atoms = None
4833                 unmatched_node = None
4834                 for node in slot_nodes:
4835                         parent_atoms = self._parent_atoms.get(node)
4836                         if not parent_atoms:
4837                                 # Normally, there are always parent atoms. If there are
4838                                 # none then something unexpected is happening and there's
4839                                 # currently no suggestion for this case.
4840                                 return None
4841                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4842                         for parent_atom in conflict_atoms:
4843                                 parent, atom = parent_atom
4844                                 if not atom.use:
4845                                         # Suggestions are currently only implemented for cases
4846                                         # in which all conflict atoms have USE deps.
4847                                         return None
4848                         if conflict_atoms:
4849                                 if matched_node is not None:
4850                                         # If conflict atoms match multiple nodes
4851                                         # then there's no suggestion.
4852                                         return None
4853                                 matched_node = node
4854                                 matched_atoms = conflict_atoms
4855                         else:
4856                                 if unmatched_node is not None:
4857                                         # Neither node is matched by conflict atoms, and
4858                                         # there is no suggestion for this case.
4859                                         return None
4860                                 unmatched_node = node
4861
4862                 if matched_node is None or unmatched_node is None:
4863                         # This shouldn't happen.
4864                         return None
4865
4866                 if unmatched_node.installed and not matched_node.installed and \
4867                         unmatched_node.cpv == matched_node.cpv:
4868                         # If the conflicting packages are the same version then
4869                         # --newuse should be all that's needed. If they are different
4870                         # versions then there's some other problem.
4871                         return "New USE are correctly set, but --newuse wasn't" + \
4872                                 " requested, so an installed package with incorrect USE " + \
4873                                 "happened to get pulled into the dependency graph. " + \
4874                                 "In order to solve " + \
4875                                 "this, either specify the --newuse option or explicitly " + \
4876                                 " reinstall '%s'." % matched_node.slot_atom
4877
4878                 if matched_node.installed and not unmatched_node.installed:
4879                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4880                         explanation = ("New USE for '%s' are incorrectly set. " + \
4881                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4882                                 (matched_node.slot_atom, atoms[0])
4883                         if len(atoms) > 1:
4884                                 for atom in atoms[1:-1]:
4885                                         explanation += ", '%s'" % (atom,)
4886                                 if len(atoms) > 2:
4887                                         explanation += ","
4888                                 explanation += " and '%s'" % (atoms[-1],)
4889                         explanation += "."
4890                         return explanation
4891
4892                 return None
4893
4894         def _process_slot_conflicts(self):
4895                 """
4896                 Process slot conflict data to identify specific atoms which
4897                 lead to conflict. These atoms only match a subset of the
4898                 packages that have been pulled into a given slot.
4899                 """
4900                 for (slot_atom, root), slot_nodes \
4901                         in self._slot_collision_info.iteritems():
4902
4903                         all_parent_atoms = set()
4904                         for pkg in slot_nodes:
4905                                 parent_atoms = self._parent_atoms.get(pkg)
4906                                 if not parent_atoms:
4907                                         continue
4908                                 all_parent_atoms.update(parent_atoms)
4909
4910                         for pkg in slot_nodes:
4911                                 parent_atoms = self._parent_atoms.get(pkg)
4912                                 if parent_atoms is None:
4913                                         parent_atoms = set()
4914                                         self._parent_atoms[pkg] = parent_atoms
4915                                 for parent_atom in all_parent_atoms:
4916                                         if parent_atom in parent_atoms:
4917                                                 continue
4918                                         # Use package set for matching since it will match via
4919                                         # PROVIDE when necessary, while match_from_list does not.
4920                                         parent, atom = parent_atom
4921                                         atom_set = InternalPackageSet(
4922                                                 initial_atoms=(atom,))
4923                                         if atom_set.findAtomForPackage(pkg):
4924                                                 parent_atoms.add(parent_atom)
4925                                         else:
4926                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4927
4928         def _reinstall_for_flags(self, forced_flags,
4929                 orig_use, orig_iuse, cur_use, cur_iuse):
4930                 """Return a set of flags that trigger reinstallation, or None if there
4931                 are no such flags."""
4932                 if "--newuse" in self.myopts:
4933                         flags = set(orig_iuse.symmetric_difference(
4934                                 cur_iuse).difference(forced_flags))
4935                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4936                                 cur_iuse.intersection(cur_use)))
4937                         if flags:
4938                                 return flags
4939                 elif "changed-use" == self.myopts.get("--reinstall"):
4940                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4941                                 cur_iuse.intersection(cur_use))
4942                         if flags:
4943                                 return flags
4944                 return None
4945
4946         def _create_graph(self, allow_unsatisfied=False):
4947                 dep_stack = self._dep_stack
4948                 while dep_stack:
4949                         self.spinner.update()
4950                         dep = dep_stack.pop()
4951                         if isinstance(dep, Package):
4952                                 if not self._add_pkg_deps(dep,
4953                                         allow_unsatisfied=allow_unsatisfied):
4954                                         return 0
4955                                 continue
4956                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4957                                 return 0
4958                 return 1
4959
4960         def _add_dep(self, dep, allow_unsatisfied=False):
4961                 debug = "--debug" in self.myopts
4962                 buildpkgonly = "--buildpkgonly" in self.myopts
4963                 nodeps = "--nodeps" in self.myopts
4964                 empty = "empty" in self.myparams
4965                 deep = "deep" in self.myparams
4966                 update = "--update" in self.myopts and dep.depth <= 1
4967                 if dep.blocker:
4968                         if not buildpkgonly and \
4969                                 not nodeps and \
4970                                 dep.parent not in self._slot_collision_nodes:
4971                                 if dep.parent.onlydeps:
4972                                         # It's safe to ignore blockers if the
4973                                         # parent is an --onlydeps node.
4974                                         return 1
4975                                 # The blocker applies to the root where
4976                                 # the parent is or will be installed.
4977                                 blocker = Blocker(atom=dep.atom,
4978                                         eapi=dep.parent.metadata["EAPI"],
4979                                         root=dep.parent.root)
4980                                 self._blocker_parents.add(blocker, dep.parent)
4981                         return 1
4982                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4983                         onlydeps=dep.onlydeps)
4984                 if not dep_pkg:
4985                         if dep.priority.optional:
4986                                 # This could be an unecessary build-time dep
4987                                 # pulled in by --with-bdeps=y.
4988                                 return 1
4989                         if allow_unsatisfied:
4990                                 self._unsatisfied_deps.append(dep)
4991                                 return 1
4992                         self._unsatisfied_deps_for_display.append(
4993                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4994                         return 0
4995                 # In some cases, dep_check will return deps that shouldn't
4996                 # be proccessed any further, so they are identified and
4997                 # discarded here. Try to discard as few as possible since
4998                 # discarded dependencies reduce the amount of information
4999                 # available for optimization of merge order.
5000                 if dep.priority.satisfied and \
5001                         not dep_pkg.installed and \
5002                         not (existing_node or empty or deep or update):
5003                         myarg = None
5004                         if dep.root == self.target_root:
5005                                 try:
5006                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5007                                 except StopIteration:
5008                                         pass
5009                                 except portage.exception.InvalidDependString:
5010                                         if not dep_pkg.installed:
5011                                                 # This shouldn't happen since the package
5012                                                 # should have been masked.
5013                                                 raise
5014                         if not myarg:
5015                                 self._ignored_deps.append(dep)
5016                                 return 1
5017
5018                 if not self._add_pkg(dep_pkg, dep):
5019                         return 0
5020                 return 1
5021
5022         def _add_pkg(self, pkg, dep):
5023                 myparent = None
5024                 priority = None
5025                 depth = 0
5026                 if dep is None:
5027                         dep = Dependency()
5028                 else:
5029                         myparent = dep.parent
5030                         priority = dep.priority
5031                         depth = dep.depth
5032                 if priority is None:
5033                         priority = DepPriority()
5034                 """
5035                 Fills the digraph with nodes comprised of packages to merge.
5036                 mybigkey is the package spec of the package to merge.
5037                 myparent is the package depending on mybigkey ( or None )
5038                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5039                         Think --onlydeps, we need to ignore packages in that case.
5040                 #stuff to add:
5041                 #SLOT-aware emerge
5042                 #IUSE-aware emerge -> USE DEP aware depgraph
5043                 #"no downgrade" emerge
5044                 """
5045                 # Ensure that the dependencies of the same package
5046                 # are never processed more than once.
5047                 previously_added = pkg in self.digraph
5048
5049                 # select the correct /var database that we'll be checking against
5050                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5051                 pkgsettings = self.pkgsettings[pkg.root]
5052
5053                 arg_atoms = None
5054                 if True:
5055                         try:
5056                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5057                         except portage.exception.InvalidDependString, e:
5058                                 if not pkg.installed:
5059                                         show_invalid_depstring_notice(
5060                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5061                                         return 0
5062                                 del e
5063
5064                 if not pkg.onlydeps:
5065                         if not pkg.installed and \
5066                                 "empty" not in self.myparams and \
5067                                 vardbapi.match(pkg.slot_atom):
5068                                 # Increase the priority of dependencies on packages that
5069                                 # are being rebuilt. This optimizes merge order so that
5070                                 # dependencies are rebuilt/updated as soon as possible,
5071                                 # which is needed especially when emerge is called by
5072                                 # revdep-rebuild since dependencies may be affected by ABI
5073                                 # breakage that has rendered them useless. Don't adjust
5074                                 # priority here when in "empty" mode since all packages
5075                                 # are being merged in that case.
5076                                 priority.rebuild = True
5077
5078                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5079                         slot_collision = False
5080                         if existing_node:
5081                                 existing_node_matches = pkg.cpv == existing_node.cpv
5082                                 if existing_node_matches and \
5083                                         pkg != existing_node and \
5084                                         dep.atom is not None:
5085                                         # Use package set for matching since it will match via
5086                                         # PROVIDE when necessary, while match_from_list does not.
5087                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5088                                         if not atom_set.findAtomForPackage(existing_node):
5089                                                 existing_node_matches = False
5090                                 if existing_node_matches:
5091                                         # The existing node can be reused.
5092                                         if arg_atoms:
5093                                                 for parent_atom in arg_atoms:
5094                                                         parent, atom = parent_atom
5095                                                         self.digraph.add(existing_node, parent,
5096                                                                 priority=priority)
5097                                                         self._add_parent_atom(existing_node, parent_atom)
5098                                         # If a direct circular dependency is not an unsatisfied
5099                                         # buildtime dependency then drop it here since otherwise
5100                                         # it can skew the merge order calculation in an unwanted
5101                                         # way.
5102                                         if existing_node != myparent or \
5103                                                 (priority.buildtime and not priority.satisfied):
5104                                                 self.digraph.addnode(existing_node, myparent,
5105                                                         priority=priority)
5106                                                 if dep.atom is not None and dep.parent is not None:
5107                                                         self._add_parent_atom(existing_node,
5108                                                                 (dep.parent, dep.atom))
5109                                         return 1
5110                                 else:
5111
5112                                         # A slot collision has occurred.  Sometimes this coincides
5113                                         # with unresolvable blockers, so the slot collision will be
5114                                         # shown later if there are no unresolvable blockers.
5115                                         self._add_slot_conflict(pkg)
5116                                         slot_collision = True
5117
5118                         if slot_collision:
5119                                 # Now add this node to the graph so that self.display()
5120                                 # can show use flags and --tree portage.output.  This node is
5121                                 # only being partially added to the graph.  It must not be
5122                                 # allowed to interfere with the other nodes that have been
5123                                 # added.  Do not overwrite data for existing nodes in
5124                                 # self.mydbapi since that data will be used for blocker
5125                                 # validation.
5126                                 # Even though the graph is now invalid, continue to process
5127                                 # dependencies so that things like --fetchonly can still
5128                                 # function despite collisions.
5129                                 pass
5130                         elif not previously_added:
5131                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5132                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5133                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5134
5135                         if not pkg.installed:
5136                                 # Allow this package to satisfy old-style virtuals in case it
5137                                 # doesn't already. Any pre-existing providers will be preferred
5138                                 # over this one.
5139                                 try:
5140                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5141                                         # For consistency, also update the global virtuals.
5142                                         settings = self.roots[pkg.root].settings
5143                                         settings.unlock()
5144                                         settings.setinst(pkg.cpv, pkg.metadata)
5145                                         settings.lock()
5146                                 except portage.exception.InvalidDependString, e:
5147                                         show_invalid_depstring_notice(
5148                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5149                                         del e
5150                                         return 0
5151
5152                 if arg_atoms:
5153                         self._set_nodes.add(pkg)
5154
5155                 # Do this even when addme is False (--onlydeps) so that the
5156                 # parent/child relationship is always known in case
5157                 # self._show_slot_collision_notice() needs to be called later.
5158                 self.digraph.add(pkg, myparent, priority=priority)
5159                 if dep.atom is not None and dep.parent is not None:
5160                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5161
5162                 if arg_atoms:
5163                         for parent_atom in arg_atoms:
5164                                 parent, atom = parent_atom
5165                                 self.digraph.add(pkg, parent, priority=priority)
5166                                 self._add_parent_atom(pkg, parent_atom)
5167
5168                 """ This section determines whether we go deeper into dependencies or not.
5169                     We want to go deeper on a few occasions:
5170                     Installing package A, we need to make sure package A's deps are met.
5171                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5172                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5173                 """
5174                 dep_stack = self._dep_stack
5175                 if "recurse" not in self.myparams:
5176                         return 1
5177                 elif pkg.installed and \
5178                         "deep" not in self.myparams:
5179                         dep_stack = self._ignored_deps
5180
5181                 self.spinner.update()
5182
5183                 if arg_atoms:
5184                         depth = 0
5185                 pkg.depth = depth
5186                 if not previously_added:
5187                         dep_stack.append(pkg)
5188                 return 1
5189
5190         def _add_parent_atom(self, pkg, parent_atom):
5191                 parent_atoms = self._parent_atoms.get(pkg)
5192                 if parent_atoms is None:
5193                         parent_atoms = set()
5194                         self._parent_atoms[pkg] = parent_atoms
5195                 parent_atoms.add(parent_atom)
5196
5197         def _add_slot_conflict(self, pkg):
5198                 self._slot_collision_nodes.add(pkg)
5199                 slot_key = (pkg.slot_atom, pkg.root)
5200                 slot_nodes = self._slot_collision_info.get(slot_key)
5201                 if slot_nodes is None:
5202                         slot_nodes = set()
5203                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5204                         self._slot_collision_info[slot_key] = slot_nodes
5205                 slot_nodes.add(pkg)
5206
5207         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5208
5209                 mytype = pkg.type_name
5210                 myroot = pkg.root
5211                 mykey = pkg.cpv
5212                 metadata = pkg.metadata
5213                 myuse = pkg.use.enabled
5214                 jbigkey = pkg
5215                 depth = pkg.depth + 1
5216                 removal_action = "remove" in self.myparams
5217
5218                 edepend={}
5219                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5220                 for k in depkeys:
5221                         edepend[k] = metadata[k]
5222
5223                 if not pkg.built and \
5224                         "--buildpkgonly" in self.myopts and \
5225                         "deep" not in self.myparams and \
5226                         "empty" not in self.myparams:
5227                         edepend["RDEPEND"] = ""
5228                         edepend["PDEPEND"] = ""
5229                 bdeps_optional = False
5230
5231                 if pkg.built and not removal_action:
5232                         if self.myopts.get("--with-bdeps", "n") == "y":
5233                                 # Pull in build time deps as requested, but marked them as
5234                                 # "optional" since they are not strictly required. This allows
5235                                 # more freedom in the merge order calculation for solving
5236                                 # circular dependencies. Don't convert to PDEPEND since that
5237                                 # could make --with-bdeps=y less effective if it is used to
5238                                 # adjust merge order to prevent built_with_use() calls from
5239                                 # failing.
5240                                 bdeps_optional = True
5241                         else:
5242                                 # built packages do not have build time dependencies.
5243                                 edepend["DEPEND"] = ""
5244
5245                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5246                         edepend["DEPEND"] = ""
5247
5248                 deps = (
5249                         ("/", edepend["DEPEND"],
5250                                 self._priority(buildtime=(not bdeps_optional),
5251                                 optional=bdeps_optional)),
5252                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5253                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5254                 )
5255
5256                 debug = "--debug" in self.myopts
5257                 strict = mytype != "installed"
5258                 try:
5259                         for dep_root, dep_string, dep_priority in deps:
5260                                 if not dep_string:
5261                                         continue
5262                                 if debug:
5263                                         print
5264                                         print "Parent:   ", jbigkey
5265                                         print "Depstring:", dep_string
5266                                         print "Priority:", dep_priority
5267                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5268                                 try:
5269                                         selected_atoms = self._select_atoms(dep_root,
5270                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5271                                                 priority=dep_priority)
5272                                 except portage.exception.InvalidDependString, e:
5273                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5274                                         return 0
5275                                 if debug:
5276                                         print "Candidates:", selected_atoms
5277
5278                                 for atom in selected_atoms:
5279                                         try:
5280
5281                                                 atom = portage.dep.Atom(atom)
5282
5283                                                 mypriority = dep_priority.copy()
5284                                                 if not atom.blocker and vardb.match(atom):
5285                                                         mypriority.satisfied = True
5286
5287                                                 if not self._add_dep(Dependency(atom=atom,
5288                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5289                                                         priority=mypriority, root=dep_root),
5290                                                         allow_unsatisfied=allow_unsatisfied):
5291                                                         return 0
5292
5293                                         except portage.exception.InvalidAtom, e:
5294                                                 show_invalid_depstring_notice(
5295                                                         pkg, dep_string, str(e))
5296                                                 del e
5297                                                 if not pkg.installed:
5298                                                         return 0
5299
5300                                 if debug:
5301                                         print "Exiting...", jbigkey
5302                 except portage.exception.AmbiguousPackageName, e:
5303                         pkgs = e.args[0]
5304                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5305                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5306                         for cpv in pkgs:
5307                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5308                         portage.writemsg("\n", noiselevel=-1)
5309                         if mytype == "binary":
5310                                 portage.writemsg(
5311                                         "!!! This binary package cannot be installed: '%s'\n" % \
5312                                         mykey, noiselevel=-1)
5313                         elif mytype == "ebuild":
5314                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5315                                 myebuild, mylocation = portdb.findname2(mykey)
5316                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5317                                         "'%s'\n" % myebuild, noiselevel=-1)
5318                         portage.writemsg("!!! Please notify the package maintainer " + \
5319                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5320                         return 0
5321                 return 1
5322
5323         def _priority(self, **kwargs):
5324                 if "remove" in self.myparams:
5325                         priority_constructor = UnmergeDepPriority
5326                 else:
5327                         priority_constructor = DepPriority
5328                 return priority_constructor(**kwargs)
5329
5330         def _dep_expand(self, root_config, atom_without_category):
5331                 """
5332                 @param root_config: a root config instance
5333                 @type root_config: RootConfig
5334                 @param atom_without_category: an atom without a category component
5335                 @type atom_without_category: String
5336                 @rtype: list
5337                 @returns: a list of atoms containing categories (possibly empty)
5338                 """
5339                 null_cp = portage.dep_getkey(insert_category_into_atom(
5340                         atom_without_category, "null"))
5341                 cat, atom_pn = portage.catsplit(null_cp)
5342
5343                 dbs = self._filtered_trees[root_config.root]["dbs"]
5344                 categories = set()
5345                 for db, pkg_type, built, installed, db_keys in dbs:
5346                         for cat in db.categories:
5347                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5348                                         categories.add(cat)
5349
5350                 deps = []
5351                 for cat in categories:
5352                         deps.append(insert_category_into_atom(
5353                                 atom_without_category, cat))
5354                 return deps
5355
5356         def _have_new_virt(self, root, atom_cp):
5357                 ret = False
5358                 for db, pkg_type, built, installed, db_keys in \
5359                         self._filtered_trees[root]["dbs"]:
5360                         if db.cp_list(atom_cp):
5361                                 ret = True
5362                                 break
5363                 return ret
5364
5365         def _iter_atoms_for_pkg(self, pkg):
5366                 # TODO: add multiple $ROOT support
5367                 if pkg.root != self.target_root:
5368                         return
5369                 atom_arg_map = self._atom_arg_map
5370                 root_config = self.roots[pkg.root]
5371                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5372                         atom_cp = portage.dep_getkey(atom)
5373                         if atom_cp != pkg.cp and \
5374                                 self._have_new_virt(pkg.root, atom_cp):
5375                                 continue
5376                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5377                         visible_pkgs.reverse() # descending order
5378                         higher_slot = None
5379                         for visible_pkg in visible_pkgs:
5380                                 if visible_pkg.cp != atom_cp:
5381                                         continue
5382                                 if pkg >= visible_pkg:
5383                                         # This is descending order, and we're not
5384                                         # interested in any versions <= pkg given.
5385                                         break
5386                                 if pkg.slot_atom != visible_pkg.slot_atom:
5387                                         higher_slot = visible_pkg
5388                                         break
5389                         if higher_slot is not None:
5390                                 continue
5391                         for arg in atom_arg_map[(atom, pkg.root)]:
5392                                 if isinstance(arg, PackageArg) and \
5393                                         arg.package != pkg:
5394                                         continue
5395                                 yield arg, atom
5396
5397         def select_files(self, myfiles):
5398                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5399                 appropriate depgraph and return a favorite list."""
5400                 debug = "--debug" in self.myopts
5401                 root_config = self.roots[self.target_root]
5402                 sets = root_config.sets
5403                 getSetAtoms = root_config.setconfig.getSetAtoms
5404                 myfavorites=[]
5405                 myroot = self.target_root
5406                 dbs = self._filtered_trees[myroot]["dbs"]
5407                 vardb = self.trees[myroot]["vartree"].dbapi
5408                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5409                 portdb = self.trees[myroot]["porttree"].dbapi
5410                 bindb = self.trees[myroot]["bintree"].dbapi
5411                 pkgsettings = self.pkgsettings[myroot]
5412                 args = []
5413                 onlydeps = "--onlydeps" in self.myopts
5414                 lookup_owners = []
5415                 for x in myfiles:
5416                         ext = os.path.splitext(x)[1]
5417                         if ext==".tbz2":
5418                                 if not os.path.exists(x):
5419                                         if os.path.exists(
5420                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5421                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5422                                         elif os.path.exists(
5423                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5424                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5425                                         else:
5426                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5427                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5428                                                 return 0, myfavorites
5429                                 mytbz2=portage.xpak.tbz2(x)
5430                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5431                                 if os.path.realpath(x) != \
5432                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5433                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5434                                         return 0, myfavorites
5435                                 db_keys = list(bindb._aux_cache_keys)
5436                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5437                                 pkg = Package(type_name="binary", root_config=root_config,
5438                                         cpv=mykey, built=True, metadata=metadata,
5439                                         onlydeps=onlydeps)
5440                                 self._pkg_cache[pkg] = pkg
5441                                 args.append(PackageArg(arg=x, package=pkg,
5442                                         root_config=root_config))
5443                         elif ext==".ebuild":
5444                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5445                                 pkgdir = os.path.dirname(ebuild_path)
5446                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5447                                 cp = pkgdir[len(tree_root)+1:]
5448                                 e = portage.exception.PackageNotFound(
5449                                         ("%s is not in a valid portage tree " + \
5450                                         "hierarchy or does not exist") % x)
5451                                 if not portage.isvalidatom(cp):
5452                                         raise e
5453                                 cat = portage.catsplit(cp)[0]
5454                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5455                                 if not portage.isvalidatom("="+mykey):
5456                                         raise e
5457                                 ebuild_path = portdb.findname(mykey)
5458                                 if ebuild_path:
5459                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5460                                                 cp, os.path.basename(ebuild_path)):
5461                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5462                                                 return 0, myfavorites
5463                                         if mykey not in portdb.xmatch(
5464                                                 "match-visible", portage.dep_getkey(mykey)):
5465                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5466                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5467                                                 print colorize("BAD", "*** page for details.")
5468                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5469                                                         "Continuing...")
5470                                 else:
5471                                         raise portage.exception.PackageNotFound(
5472                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5473                                 db_keys = list(portdb._aux_cache_keys)
5474                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5475                                 pkg = Package(type_name="ebuild", root_config=root_config,
5476                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5477                                 pkgsettings.setcpv(pkg)
5478                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5479                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5480                                 self._pkg_cache[pkg] = pkg
5481                                 args.append(PackageArg(arg=x, package=pkg,
5482                                         root_config=root_config))
5483                         elif x.startswith(os.path.sep):
5484                                 if not x.startswith(myroot):
5485                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5486                                                 " $ROOT.\n") % x, noiselevel=-1)
5487                                         return 0, []
5488                                 # Queue these up since it's most efficient to handle
5489                                 # multiple files in a single iter_owners() call.
5490                                 lookup_owners.append(x)
5491                         else:
5492                                 if x in ("system", "world"):
5493                                         x = SETPREFIX + x
5494                                 if x.startswith(SETPREFIX):
5495                                         s = x[len(SETPREFIX):]
5496                                         if s not in sets:
5497                                                 raise portage.exception.PackageSetNotFound(s)
5498                                         if s in self._sets:
5499                                                 continue
5500                                         # Recursively expand sets so that containment tests in
5501                                         # self._get_parent_sets() properly match atoms in nested
5502                                         # sets (like if world contains system).
5503                                         expanded_set = InternalPackageSet(
5504                                                 initial_atoms=getSetAtoms(s))
5505                                         self._sets[s] = expanded_set
5506                                         args.append(SetArg(arg=x, set=expanded_set,
5507                                                 root_config=root_config))
5508                                         continue
5509                                 if not is_valid_package_atom(x):
5510                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5511                                                 noiselevel=-1)
5512                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5513                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5514                                         return (0,[])
5515                                 # Don't expand categories or old-style virtuals here unless
5516                                 # necessary. Expansion of old-style virtuals here causes at
5517                                 # least the following problems:
5518                                 #   1) It's more difficult to determine which set(s) an atom
5519                                 #      came from, if any.
5520                                 #   2) It takes away freedom from the resolver to choose other
5521                                 #      possible expansions when necessary.
5522                                 if "/" in x:
5523                                         args.append(AtomArg(arg=x, atom=x,
5524                                                 root_config=root_config))
5525                                         continue
5526                                 expanded_atoms = self._dep_expand(root_config, x)
5527                                 installed_cp_set = set()
5528                                 for atom in expanded_atoms:
5529                                         atom_cp = portage.dep_getkey(atom)
5530                                         if vardb.cp_list(atom_cp):
5531                                                 installed_cp_set.add(atom_cp)
5532                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5533                                         installed_cp = iter(installed_cp_set).next()
5534                                         expanded_atoms = [atom for atom in expanded_atoms \
5535                                                 if portage.dep_getkey(atom) == installed_cp]
5536
5537                                 if len(expanded_atoms) > 1:
5538                                         print
5539                                         print
5540                                         ambiguous_package_name(x, expanded_atoms, root_config,
5541                                                 self.spinner, self.myopts)
5542                                         return False, myfavorites
5543                                 if expanded_atoms:
5544                                         atom = expanded_atoms[0]
5545                                 else:
5546                                         null_atom = insert_category_into_atom(x, "null")
5547                                         null_cp = portage.dep_getkey(null_atom)
5548                                         cat, atom_pn = portage.catsplit(null_cp)
5549                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5550                                         if virts_p:
5551                                                 # Allow the depgraph to choose which virtual.
5552                                                 atom = insert_category_into_atom(x, "virtual")
5553                                         else:
5554                                                 atom = insert_category_into_atom(x, "null")
5555
5556                                 args.append(AtomArg(arg=x, atom=atom,
5557                                         root_config=root_config))
5558
5559                 if lookup_owners:
5560                         relative_paths = []
5561                         search_for_multiple = False
5562                         if len(lookup_owners) > 1:
5563                                 search_for_multiple = True
5564
5565                         for x in lookup_owners:
5566                                 if not search_for_multiple and os.path.isdir(x):
5567                                         search_for_multiple = True
5568                                 relative_paths.append(x[len(myroot):])
5569
5570                         owners = set()
5571                         for pkg, relative_path in \
5572                                 real_vardb._owners.iter_owners(relative_paths):
5573                                 owners.add(pkg.mycpv)
5574                                 if not search_for_multiple:
5575                                         break
5576
5577                         if not owners:
5578                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5579                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5580                                 return 0, []
5581
5582                         for cpv in owners:
5583                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5584                                 if not slot:
5585                                         # portage now masks packages with missing slot, but it's
5586                                         # possible that one was installed by an older version
5587                                         atom = portage.cpv_getkey(cpv)
5588                                 else:
5589                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5590                                 args.append(AtomArg(arg=atom, atom=atom,
5591                                         root_config=root_config))
5592
5593                 if "--update" in self.myopts:
5594                         # In some cases, the greedy slots behavior can pull in a slot that
5595                         # the user would want to uninstall due to it being blocked by a
5596                         # newer version in a different slot. Therefore, it's necessary to
5597                         # detect and discard any that should be uninstalled. Each time
5598                         # that arguments are updated, package selections are repeated in
5599                         # order to ensure consistency with the current arguments:
5600                         #
5601                         #  1) Initialize args
5602                         #  2) Select packages and generate initial greedy atoms
5603                         #  3) Update args with greedy atoms
5604                         #  4) Select packages and generate greedy atoms again, while
5605                         #     accounting for any blockers between selected packages
5606                         #  5) Update args with revised greedy atoms
5607
5608                         self._set_args(args)
5609                         greedy_args = []
5610                         for arg in args:
5611                                 greedy_args.append(arg)
5612                                 if not isinstance(arg, AtomArg):
5613                                         continue
5614                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5615                                         greedy_args.append(
5616                                                 AtomArg(arg=arg.arg, atom=atom,
5617                                                         root_config=arg.root_config))
5618
5619                         self._set_args(greedy_args)
5620                         del greedy_args
5621
5622                         # Revise greedy atoms, accounting for any blockers
5623                         # between selected packages.
5624                         revised_greedy_args = []
5625                         for arg in args:
5626                                 revised_greedy_args.append(arg)
5627                                 if not isinstance(arg, AtomArg):
5628                                         continue
5629                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5630                                         blocker_lookahead=True):
5631                                         revised_greedy_args.append(
5632                                                 AtomArg(arg=arg.arg, atom=atom,
5633                                                         root_config=arg.root_config))
5634                         args = revised_greedy_args
5635                         del revised_greedy_args
5636
5637                 self._set_args(args)
5638
5639                 myfavorites = set(myfavorites)
5640                 for arg in args:
5641                         if isinstance(arg, (AtomArg, PackageArg)):
5642                                 myfavorites.add(arg.atom)
5643                         elif isinstance(arg, SetArg):
5644                                 myfavorites.add(arg.arg)
5645                 myfavorites = list(myfavorites)
5646
5647                 pprovideddict = pkgsettings.pprovideddict
5648                 if debug:
5649                         portage.writemsg("\n", noiselevel=-1)
5650                 # Order needs to be preserved since a feature of --nodeps
5651                 # is to allow the user to force a specific merge order.
5652                 args.reverse()
5653                 while args:
5654                         arg = args.pop()
5655                         for atom in arg.set:
5656                                 self.spinner.update()
5657                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5658                                         root=myroot, parent=arg)
5659                                 atom_cp = portage.dep_getkey(atom)
5660                                 try:
5661                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5662                                         if pprovided and portage.match_from_list(atom, pprovided):
5663                                                 # A provided package has been specified on the command line.
5664                                                 self._pprovided_args.append((arg, atom))
5665                                                 continue
5666                                         if isinstance(arg, PackageArg):
5667                                                 if not self._add_pkg(arg.package, dep) or \
5668                                                         not self._create_graph():
5669                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5670                                                                 "dependencies for %s\n") % arg.arg)
5671                                                         return 0, myfavorites
5672                                                 continue
5673                                         if debug:
5674                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5675                                                         (arg, atom), noiselevel=-1)
5676                                         pkg, existing_node = self._select_package(
5677                                                 myroot, atom, onlydeps=onlydeps)
5678                                         if not pkg:
5679                                                 if not (isinstance(arg, SetArg) and \
5680                                                         arg.name in ("system", "world")):
5681                                                         self._unsatisfied_deps_for_display.append(
5682                                                                 ((myroot, atom), {}))
5683                                                         return 0, myfavorites
5684                                                 self._missing_args.append((arg, atom))
5685                                                 continue
5686                                         if atom_cp != pkg.cp:
5687                                                 # For old-style virtuals, we need to repeat the
5688                                                 # package.provided check against the selected package.
5689                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5690                                                 pprovided = pprovideddict.get(pkg.cp)
5691                                                 if pprovided and \
5692                                                         portage.match_from_list(expanded_atom, pprovided):
5693                                                         # A provided package has been
5694                                                         # specified on the command line.
5695                                                         self._pprovided_args.append((arg, atom))
5696                                                         continue
5697                                         if pkg.installed and "selective" not in self.myparams:
5698                                                 self._unsatisfied_deps_for_display.append(
5699                                                         ((myroot, atom), {}))
5700                                                 # Previous behavior was to bail out in this case, but
5701                                                 # since the dep is satisfied by the installed package,
5702                                                 # it's more friendly to continue building the graph
5703                                                 # and just show a warning message. Therefore, only bail
5704                                                 # out here if the atom is not from either the system or
5705                                                 # world set.
5706                                                 if not (isinstance(arg, SetArg) and \
5707                                                         arg.name in ("system", "world")):
5708                                                         return 0, myfavorites
5709
5710                                         # Add the selected package to the graph as soon as possible
5711                                         # so that later dep_check() calls can use it as feedback
5712                                         # for making more consistent atom selections.
5713                                         if not self._add_pkg(pkg, dep):
5714                                                 if isinstance(arg, SetArg):
5715                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5716                                                                 "dependencies for %s from %s\n") % \
5717                                                                 (atom, arg.arg))
5718                                                 else:
5719                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5720                                                                 "dependencies for %s\n") % atom)
5721                                                 return 0, myfavorites
5722
5723                                 except portage.exception.MissingSignature, e:
5724                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5725                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5726                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5727                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5728                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5729                                         return 0, myfavorites
5730                                 except portage.exception.InvalidSignature, e:
5731                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5732                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5733                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5734                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5735                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5736                                         return 0, myfavorites
5737                                 except SystemExit, e:
5738                                         raise # Needed else can't exit
5739                                 except Exception, e:
5740                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5741                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5742                                         raise
5743
5744                 # Now that the root packages have been added to the graph,
5745                 # process the dependencies.
5746                 if not self._create_graph():
5747                         return 0, myfavorites
5748
5749                 missing=0
5750                 if "--usepkgonly" in self.myopts:
5751                         for xs in self.digraph.all_nodes():
5752                                 if not isinstance(xs, Package):
5753                                         continue
5754                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5755                                         if missing == 0:
5756                                                 print
5757                                         missing += 1
5758                                         print "Missing binary for:",xs[2]
5759
5760                 try:
5761                         self.altlist()
5762                 except self._unknown_internal_error:
5763                         return False, myfavorites
5764
5765                 # We're true here unless we are missing binaries.
5766                 return (not missing,myfavorites)
5767
5768         def _set_args(self, args):
5769                 """
5770                 Create the "args" package set from atoms and packages given as
5771                 arguments. This method can be called multiple times if necessary.
5772                 The package selection cache is automatically invalidated, since
5773                 arguments influence package selections.
5774                 """
5775                 args_set = self._sets["args"]
5776                 args_set.clear()
5777                 for arg in args:
5778                         if not isinstance(arg, (AtomArg, PackageArg)):
5779                                 continue
5780                         atom = arg.atom
5781                         if atom in args_set:
5782                                 continue
5783                         args_set.add(atom)
5784
5785                 self._set_atoms.clear()
5786                 self._set_atoms.update(chain(*self._sets.itervalues()))
5787                 atom_arg_map = self._atom_arg_map
5788                 atom_arg_map.clear()
5789                 for arg in args:
5790                         for atom in arg.set:
5791                                 atom_key = (atom, arg.root_config.root)
5792                                 refs = atom_arg_map.get(atom_key)
5793                                 if refs is None:
5794                                         refs = []
5795                                         atom_arg_map[atom_key] = refs
5796                                         if arg not in refs:
5797                                                 refs.append(arg)
5798
5799                 # Invalidate the package selection cache, since
5800                 # arguments influence package selections.
5801                 self._highest_pkg_cache.clear()
5802                 for trees in self._filtered_trees.itervalues():
5803                         trees["porttree"].dbapi._clear_cache()
5804
5805         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5806                 """
5807                 Return a list of slot atoms corresponding to installed slots that
5808                 differ from the slot of the highest visible match. When
5809                 blocker_lookahead is True, slot atoms that would trigger a blocker
5810                 conflict are automatically discarded, potentially allowing automatic
5811                 uninstallation of older slots when appropriate.
5812                 """
5813                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5814                 if highest_pkg is None:
5815                         return []
5816                 vardb = root_config.trees["vartree"].dbapi
5817                 slots = set()
5818                 for cpv in vardb.match(atom):
5819                         # don't mix new virtuals with old virtuals
5820                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5821                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5822
5823                 slots.add(highest_pkg.metadata["SLOT"])
5824                 if len(slots) == 1:
5825                         return []
5826                 greedy_pkgs = []
5827                 slots.remove(highest_pkg.metadata["SLOT"])
5828                 while slots:
5829                         slot = slots.pop()
5830                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5831                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5832                         if pkg is not None and \
5833                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5834                                 greedy_pkgs.append(pkg)
5835                 if not greedy_pkgs:
5836                         return []
5837                 if not blocker_lookahead:
5838                         return [pkg.slot_atom for pkg in greedy_pkgs]
5839
5840                 blockers = {}
5841                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5842                 for pkg in greedy_pkgs + [highest_pkg]:
5843                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5844                         try:
5845                                 atoms = self._select_atoms(
5846                                         pkg.root, dep_str, pkg.use.enabled,
5847                                         parent=pkg, strict=True)
5848                         except portage.exception.InvalidDependString:
5849                                 continue
5850                         blocker_atoms = (x for x in atoms if x.blocker)
5851                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5852
5853                 if highest_pkg not in blockers:
5854                         return []
5855
5856                 # filter packages with invalid deps
5857                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5858
5859                 # filter packages that conflict with highest_pkg
5860                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5861                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5862                         blockers[pkg].findAtomForPackage(highest_pkg))]
5863
5864                 if not greedy_pkgs:
5865                         return []
5866
5867                 # If two packages conflict, discard the lower version.
5868                 discard_pkgs = set()
5869                 greedy_pkgs.sort(reverse=True)
5870                 for i in xrange(len(greedy_pkgs) - 1):
5871                         pkg1 = greedy_pkgs[i]
5872                         if pkg1 in discard_pkgs:
5873                                 continue
5874                         for j in xrange(i + 1, len(greedy_pkgs)):
5875                                 pkg2 = greedy_pkgs[j]
5876                                 if pkg2 in discard_pkgs:
5877                                         continue
5878                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5879                                         blockers[pkg2].findAtomForPackage(pkg1):
5880                                         # pkg1 > pkg2
5881                                         discard_pkgs.add(pkg2)
5882
5883                 return [pkg.slot_atom for pkg in greedy_pkgs \
5884                         if pkg not in discard_pkgs]
5885
5886         def _select_atoms_from_graph(self, *pargs, **kwargs):
5887                 """
5888                 Prefer atoms matching packages that have already been
5889                 added to the graph or those that are installed and have
5890                 not been scheduled for replacement.
5891                 """
5892                 kwargs["trees"] = self._graph_trees
5893                 return self._select_atoms_highest_available(*pargs, **kwargs)
5894
5895         def _select_atoms_highest_available(self, root, depstring,
5896                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5897                 """This will raise InvalidDependString if necessary. If trees is
5898                 None then self._filtered_trees is used."""
5899                 pkgsettings = self.pkgsettings[root]
5900                 if trees is None:
5901                         trees = self._filtered_trees
5902                 if not getattr(priority, "buildtime", False):
5903                         # The parent should only be passed to dep_check() for buildtime
5904                         # dependencies since that's the only case when it's appropriate
5905                         # to trigger the circular dependency avoidance code which uses it.
5906                         # It's important not to trigger the same circular dependency
5907                         # avoidance code for runtime dependencies since it's not needed
5908                         # and it can promote an incorrect package choice.
5909                         parent = None
5910                 if True:
5911                         try:
5912                                 if parent is not None:
5913                                         trees[root]["parent"] = parent
5914                                 if not strict:
5915                                         portage.dep._dep_check_strict = False
5916                                 mycheck = portage.dep_check(depstring, None,
5917                                         pkgsettings, myuse=myuse,
5918                                         myroot=root, trees=trees)
5919                         finally:
5920                                 if parent is not None:
5921                                         trees[root].pop("parent")
5922                                 portage.dep._dep_check_strict = True
5923                         if not mycheck[0]:
5924                                 raise portage.exception.InvalidDependString(mycheck[1])
5925                         selected_atoms = mycheck[1]
5926                 return selected_atoms
5927
5928         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5929                 atom = portage.dep.Atom(atom)
5930                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5931                 atom_without_use = atom
5932                 if atom.use:
5933                         atom_without_use = portage.dep.remove_slot(atom)
5934                         if atom.slot:
5935                                 atom_without_use += ":" + atom.slot
5936                         atom_without_use = portage.dep.Atom(atom_without_use)
5937                 xinfo = '"%s"' % atom
5938                 if arg:
5939                         xinfo='"%s"' % arg
5940                 # Discard null/ from failed cpv_expand category expansion.
5941                 xinfo = xinfo.replace("null/", "")
5942                 masked_packages = []
5943                 missing_use = []
5944                 masked_pkg_instances = set()
5945                 missing_licenses = []
5946                 have_eapi_mask = False
5947                 pkgsettings = self.pkgsettings[root]
5948                 implicit_iuse = pkgsettings._get_implicit_iuse()
5949                 root_config = self.roots[root]
5950                 portdb = self.roots[root].trees["porttree"].dbapi
5951                 dbs = self._filtered_trees[root]["dbs"]
5952                 for db, pkg_type, built, installed, db_keys in dbs:
5953                         if installed:
5954                                 continue
5955                         match = db.match
5956                         if hasattr(db, "xmatch"):
5957                                 cpv_list = db.xmatch("match-all", atom_without_use)
5958                         else:
5959                                 cpv_list = db.match(atom_without_use)
5960                         # descending order
5961                         cpv_list.reverse()
5962                         for cpv in cpv_list:
5963                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5964                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5965                                 if metadata is not None:
5966                                         pkg = Package(built=built, cpv=cpv,
5967                                                 installed=installed, metadata=metadata,
5968                                                 root_config=root_config)
5969                                         if pkg.cp != atom.cp:
5970                                                 # A cpv can be returned from dbapi.match() as an
5971                                                 # old-style virtual match even in cases when the
5972                                                 # package does not actually PROVIDE the virtual.
5973                                                 # Filter out any such false matches here.
5974                                                 if not atom_set.findAtomForPackage(pkg):
5975                                                         continue
5976                                         if mreasons:
5977                                                 masked_pkg_instances.add(pkg)
5978                                         if atom.use:
5979                                                 missing_use.append(pkg)
5980                                                 if not mreasons:
5981                                                         continue
5982                                 masked_packages.append(
5983                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5984
5985                 missing_use_reasons = []
5986                 missing_iuse_reasons = []
5987                 for pkg in missing_use:
5988                         use = pkg.use.enabled
5989                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5990                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5991                         missing_iuse = []
5992                         for x in atom.use.required:
5993                                 if iuse_re.match(x) is None:
5994                                         missing_iuse.append(x)
5995                         mreasons = []
5996                         if missing_iuse:
5997                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5998                                 missing_iuse_reasons.append((pkg, mreasons))
5999                         else:
6000                                 need_enable = sorted(atom.use.enabled.difference(use))
6001                                 need_disable = sorted(atom.use.disabled.intersection(use))
6002                                 if need_enable or need_disable:
6003                                         changes = []
6004                                         changes.extend(colorize("red", "+" + x) \
6005                                                 for x in need_enable)
6006                                         changes.extend(colorize("blue", "-" + x) \
6007                                                 for x in need_disable)
6008                                         mreasons.append("Change USE: %s" % " ".join(changes))
6009                                         missing_use_reasons.append((pkg, mreasons))
6010
6011                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6012                         in missing_use_reasons if pkg not in masked_pkg_instances]
6013
6014                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6015                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6016
6017                 show_missing_use = False
6018                 if unmasked_use_reasons:
6019                         # Only show the latest version.
6020                         show_missing_use = unmasked_use_reasons[:1]
6021                 elif unmasked_iuse_reasons:
6022                         if missing_use_reasons:
6023                                 # All packages with required IUSE are masked,
6024                                 # so display a normal masking message.
6025                                 pass
6026                         else:
6027                                 show_missing_use = unmasked_iuse_reasons
6028
6029                 if show_missing_use:
6030                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6031                         print "!!! One of the following packages is required to complete your request:"
6032                         for pkg, mreasons in show_missing_use:
6033                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6034
6035                 elif masked_packages:
6036                         print "\n!!! " + \
6037                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6038                                 colorize("INFORM", xinfo) + \
6039                                 colorize("BAD", " have been masked.")
6040                         print "!!! One of the following masked packages is required to complete your request:"
6041                         have_eapi_mask = show_masked_packages(masked_packages)
6042                         if have_eapi_mask:
6043                                 print
6044                                 msg = ("The current version of portage supports " + \
6045                                         "EAPI '%s'. You must upgrade to a newer version" + \
6046                                         " of portage before EAPI masked packages can" + \
6047                                         " be installed.") % portage.const.EAPI
6048                                 from textwrap import wrap
6049                                 for line in wrap(msg, 75):
6050                                         print line
6051                         print
6052                         show_mask_docs()
6053                 else:
6054                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6055
6056                 # Show parent nodes and the argument that pulled them in.
6057                 traversed_nodes = set()
6058                 node = myparent
6059                 msg = []
6060                 while node is not None:
6061                         traversed_nodes.add(node)
6062                         msg.append('(dependency required by "%s" [%s])' % \
6063                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6064                         # When traversing to parents, prefer arguments over packages
6065                         # since arguments are root nodes. Never traverse the same
6066                         # package twice, in order to prevent an infinite loop.
6067                         selected_parent = None
6068                         for parent in self.digraph.parent_nodes(node):
6069                                 if isinstance(parent, DependencyArg):
6070                                         msg.append('(dependency required by "%s" [argument])' % \
6071                                                 (colorize('INFORM', str(parent))))
6072                                         selected_parent = None
6073                                         break
6074                                 if parent not in traversed_nodes:
6075                                         selected_parent = parent
6076                         node = selected_parent
6077                 for line in msg:
6078                         print line
6079
6080                 print
6081
6082         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6083                 cache_key = (root, atom, onlydeps)
6084                 ret = self._highest_pkg_cache.get(cache_key)
6085                 if ret is not None:
6086                         pkg, existing = ret
6087                         if pkg and not existing:
6088                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6089                                 if existing and existing == pkg:
6090                                         # Update the cache to reflect that the
6091                                         # package has been added to the graph.
6092                                         ret = pkg, pkg
6093                                         self._highest_pkg_cache[cache_key] = ret
6094                         return ret
6095                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6096                 self._highest_pkg_cache[cache_key] = ret
6097                 pkg, existing = ret
6098                 if pkg is not None:
6099                         settings = pkg.root_config.settings
6100                         if visible(settings, pkg) and not (pkg.installed and \
6101                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6102                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6103                 return ret
6104
6105         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6106                 root_config = self.roots[root]
6107                 pkgsettings = self.pkgsettings[root]
6108                 dbs = self._filtered_trees[root]["dbs"]
6109                 vardb = self.roots[root].trees["vartree"].dbapi
6110                 portdb = self.roots[root].trees["porttree"].dbapi
6111                 # List of acceptable packages, ordered by type preference.
6112                 matched_packages = []
6113                 highest_version = None
6114                 if not isinstance(atom, portage.dep.Atom):
6115                         atom = portage.dep.Atom(atom)
6116                 atom_cp = atom.cp
6117                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6118                 existing_node = None
6119                 myeb = None
6120                 usepkgonly = "--usepkgonly" in self.myopts
6121                 empty = "empty" in self.myparams
6122                 selective = "selective" in self.myparams
6123                 reinstall = False
6124                 noreplace = "--noreplace" in self.myopts
6125                 # Behavior of the "selective" parameter depends on
6126                 # whether or not a package matches an argument atom.
6127                 # If an installed package provides an old-style
6128                 # virtual that is no longer provided by an available
6129                 # package, the installed package may match an argument
6130                 # atom even though none of the available packages do.
6131                 # Therefore, "selective" logic does not consider
6132                 # whether or not an installed package matches an
6133                 # argument atom. It only considers whether or not
6134                 # available packages match argument atoms, which is
6135                 # represented by the found_available_arg flag.
6136                 found_available_arg = False
6137                 for find_existing_node in True, False:
6138                         if existing_node:
6139                                 break
6140                         for db, pkg_type, built, installed, db_keys in dbs:
6141                                 if existing_node:
6142                                         break
6143                                 if installed and not find_existing_node:
6144                                         want_reinstall = reinstall or empty or \
6145                                                 (found_available_arg and not selective)
6146                                         if want_reinstall and matched_packages:
6147                                                 continue
6148                                 if hasattr(db, "xmatch"):
6149                                         cpv_list = db.xmatch("match-all", atom)
6150                                 else:
6151                                         cpv_list = db.match(atom)
6152
6153                                 # USE=multislot can make an installed package appear as if
6154                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6155                                 # won't do any good as long as USE=multislot is enabled since
6156                                 # the newly built package still won't have the expected slot.
6157                                 # Therefore, assume that such SLOT dependencies are already
6158                                 # satisfied rather than forcing a rebuild.
6159                                 if installed and not cpv_list and atom.slot:
6160                                         for cpv in db.match(atom.cp):
6161                                                 slot_available = False
6162                                                 for other_db, other_type, other_built, \
6163                                                         other_installed, other_keys in dbs:
6164                                                         try:
6165                                                                 if atom.slot == \
6166                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6167                                                                         slot_available = True
6168                                                                         break
6169                                                         except KeyError:
6170                                                                 pass
6171                                                 if not slot_available:
6172                                                         continue
6173                                                 inst_pkg = self._pkg(cpv, "installed",
6174                                                         root_config, installed=installed)
6175                                                 # Remove the slot from the atom and verify that
6176                                                 # the package matches the resulting atom.
6177                                                 atom_without_slot = portage.dep.remove_slot(atom)
6178                                                 if atom.use:
6179                                                         atom_without_slot += str(atom.use)
6180                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6181                                                 if portage.match_from_list(
6182                                                         atom_without_slot, [inst_pkg]):
6183                                                         cpv_list = [inst_pkg.cpv]
6184                                                 break
6185
6186                                 if not cpv_list:
6187                                         continue
6188                                 pkg_status = "merge"
6189                                 if installed or onlydeps:
6190                                         pkg_status = "nomerge"
6191                                 # descending order
6192                                 cpv_list.reverse()
6193                                 for cpv in cpv_list:
6194                                         # Make --noreplace take precedence over --newuse.
6195                                         if not installed and noreplace and \
6196                                                 cpv in vardb.match(atom):
6197                                                 # If the installed version is masked, it may
6198                                                 # be necessary to look at lower versions,
6199                                                 # in case there is a visible downgrade.
6200                                                 continue
6201                                         reinstall_for_flags = None
6202                                         cache_key = (pkg_type, root, cpv, pkg_status)
6203                                         calculated_use = True
6204                                         pkg = self._pkg_cache.get(cache_key)
6205                                         if pkg is None:
6206                                                 calculated_use = False
6207                                                 try:
6208                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6209                                                 except KeyError:
6210                                                         continue
6211                                                 pkg = Package(built=built, cpv=cpv,
6212                                                         installed=installed, metadata=metadata,
6213                                                         onlydeps=onlydeps, root_config=root_config,
6214                                                         type_name=pkg_type)
6215                                                 metadata = pkg.metadata
6216                                                 if not built:
6217                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6218                                                 if not built and ("?" in metadata["LICENSE"] or \
6219                                                         "?" in metadata["PROVIDE"]):
6220                                                         # This is avoided whenever possible because
6221                                                         # it's expensive. It only needs to be done here
6222                                                         # if it has an effect on visibility.
6223                                                         pkgsettings.setcpv(pkg)
6224                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6225                                                         calculated_use = True
6226                                                 self._pkg_cache[pkg] = pkg
6227
6228                                         if not installed or (built and matched_packages):
6229                                                 # Only enforce visibility on installed packages
6230                                                 # if there is at least one other visible package
6231                                                 # available. By filtering installed masked packages
6232                                                 # here, packages that have been masked since they
6233                                                 # were installed can be automatically downgraded
6234                                                 # to an unmasked version.
6235                                                 try:
6236                                                         if not visible(pkgsettings, pkg):
6237                                                                 continue
6238                                                 except portage.exception.InvalidDependString:
6239                                                         if not installed:
6240                                                                 continue
6241
6242                                                 # Enable upgrade or downgrade to a version
6243                                                 # with visible KEYWORDS when the installed
6244                                                 # version is masked by KEYWORDS, but never
6245                                                 # reinstall the same exact version only due
6246                                                 # to a KEYWORDS mask.
6247                                                 if built and matched_packages:
6248
6249                                                         different_version = None
6250                                                         for avail_pkg in matched_packages:
6251                                                                 if not portage.dep.cpvequal(
6252                                                                         pkg.cpv, avail_pkg.cpv):
6253                                                                         different_version = avail_pkg
6254                                                                         break
6255                                                         if different_version is not None:
6256
6257                                                                 if installed and \
6258                                                                         pkgsettings._getMissingKeywords(
6259                                                                         pkg.cpv, pkg.metadata):
6260                                                                         continue
6261
6262                                                                 # If the ebuild no longer exists or it's
6263                                                                 # keywords have been dropped, reject built
6264                                                                 # instances (installed or binary).
6265                                                                 # If --usepkgonly is enabled, assume that
6266                                                                 # the ebuild status should be ignored.
6267                                                                 if not usepkgonly:
6268                                                                         try:
6269                                                                                 pkg_eb = self._pkg(
6270                                                                                         pkg.cpv, "ebuild", root_config)
6271                                                                         except portage.exception.PackageNotFound:
6272                                                                                 continue
6273                                                                         else:
6274                                                                                 if not visible(pkgsettings, pkg_eb):
6275                                                                                         continue
6276
6277                                         if not pkg.built and not calculated_use:
6278                                                 # This is avoided whenever possible because
6279                                                 # it's expensive.
6280                                                 pkgsettings.setcpv(pkg)
6281                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6282
6283                                         if pkg.cp != atom.cp:
6284                                                 # A cpv can be returned from dbapi.match() as an
6285                                                 # old-style virtual match even in cases when the
6286                                                 # package does not actually PROVIDE the virtual.
6287                                                 # Filter out any such false matches here.
6288                                                 if not atom_set.findAtomForPackage(pkg):
6289                                                         continue
6290
6291                                         myarg = None
6292                                         if root == self.target_root:
6293                                                 try:
6294                                                         # Ebuild USE must have been calculated prior
6295                                                         # to this point, in case atoms have USE deps.
6296                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6297                                                 except StopIteration:
6298                                                         pass
6299                                                 except portage.exception.InvalidDependString:
6300                                                         if not installed:
6301                                                                 # masked by corruption
6302                                                                 continue
6303                                         if not installed and myarg:
6304                                                 found_available_arg = True
6305
6306                                         if atom.use and not pkg.built:
6307                                                 use = pkg.use.enabled
6308                                                 if atom.use.enabled.difference(use):
6309                                                         continue
6310                                                 if atom.use.disabled.intersection(use):
6311                                                         continue
6312                                         if pkg.cp == atom_cp:
6313                                                 if highest_version is None:
6314                                                         highest_version = pkg
6315                                                 elif pkg > highest_version:
6316                                                         highest_version = pkg
6317                                         # At this point, we've found the highest visible
6318                                         # match from the current repo. Any lower versions
6319                                         # from this repo are ignored, so this so the loop
6320                                         # will always end with a break statement below
6321                                         # this point.
6322                                         if find_existing_node:
6323                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6324                                                 if not e_pkg:
6325                                                         break
6326                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6327                                                         if highest_version and \
6328                                                                 e_pkg.cp == atom_cp and \
6329                                                                 e_pkg < highest_version and \
6330                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6331                                                                 # There is a higher version available in a
6332                                                                 # different slot, so this existing node is
6333                                                                 # irrelevant.
6334                                                                 pass
6335                                                         else:
6336                                                                 matched_packages.append(e_pkg)
6337                                                                 existing_node = e_pkg
6338                                                 break
6339                                         # Compare built package to current config and
6340                                         # reject the built package if necessary.
6341                                         if built and not installed and \
6342                                                 ("--newuse" in self.myopts or \
6343                                                 "--reinstall" in self.myopts):
6344                                                 iuses = pkg.iuse.all
6345                                                 old_use = pkg.use.enabled
6346                                                 if myeb:
6347                                                         pkgsettings.setcpv(myeb)
6348                                                 else:
6349                                                         pkgsettings.setcpv(pkg)
6350                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6351                                                 forced_flags = set()
6352                                                 forced_flags.update(pkgsettings.useforce)
6353                                                 forced_flags.update(pkgsettings.usemask)
6354                                                 cur_iuse = iuses
6355                                                 if myeb and not usepkgonly:
6356                                                         cur_iuse = myeb.iuse.all
6357                                                 if self._reinstall_for_flags(forced_flags,
6358                                                         old_use, iuses,
6359                                                         now_use, cur_iuse):
6360                                                         break
6361                                         # Compare current config to installed package
6362                                         # and do not reinstall if possible.
6363                                         if not installed and \
6364                                                 ("--newuse" in self.myopts or \
6365                                                 "--reinstall" in self.myopts) and \
6366                                                 cpv in vardb.match(atom):
6367                                                 pkgsettings.setcpv(pkg)
6368                                                 forced_flags = set()
6369                                                 forced_flags.update(pkgsettings.useforce)
6370                                                 forced_flags.update(pkgsettings.usemask)
6371                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6372                                                 old_iuse = set(filter_iuse_defaults(
6373                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6374                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6375                                                 cur_iuse = pkg.iuse.all
6376                                                 reinstall_for_flags = \
6377                                                         self._reinstall_for_flags(
6378                                                         forced_flags, old_use, old_iuse,
6379                                                         cur_use, cur_iuse)
6380                                                 if reinstall_for_flags:
6381                                                         reinstall = True
6382                                         if not built:
6383                                                 myeb = pkg
6384                                         matched_packages.append(pkg)
6385                                         if reinstall_for_flags:
6386                                                 self._reinstall_nodes[pkg] = \
6387                                                         reinstall_for_flags
6388                                         break
6389
6390                 if not matched_packages:
6391                         return None, None
6392
6393                 if "--debug" in self.myopts:
6394                         for pkg in matched_packages:
6395                                 portage.writemsg("%s %s\n" % \
6396                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6397
6398                 # Filter out any old-style virtual matches if they are
6399                 # mixed with new-style virtual matches.
6400                 cp = portage.dep_getkey(atom)
6401                 if len(matched_packages) > 1 and \
6402                         "virtual" == portage.catsplit(cp)[0]:
6403                         for pkg in matched_packages:
6404                                 if pkg.cp != cp:
6405                                         continue
6406                                 # Got a new-style virtual, so filter
6407                                 # out any old-style virtuals.
6408                                 matched_packages = [pkg for pkg in matched_packages \
6409                                         if pkg.cp == cp]
6410                                 break
6411
6412                 if len(matched_packages) > 1:
6413                         bestmatch = portage.best(
6414                                 [pkg.cpv for pkg in matched_packages])
6415                         matched_packages = [pkg for pkg in matched_packages \
6416                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6417
6418                 # ordered by type preference ("ebuild" type is the last resort)
6419                 return  matched_packages[-1], existing_node
6420
6421         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6422                 """
6423                 Select packages that have already been added to the graph or
6424                 those that are installed and have not been scheduled for
6425                 replacement.
6426                 """
6427                 graph_db = self._graph_trees[root]["porttree"].dbapi
6428                 matches = graph_db.match_pkgs(atom)
6429                 if not matches:
6430                         return None, None
6431                 pkg = matches[-1] # highest match
6432                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6433                 return pkg, in_graph
6434
6435         def _complete_graph(self):
6436                 """
6437                 Add any deep dependencies of required sets (args, system, world) that
6438                 have not been pulled into the graph yet. This ensures that the graph
6439                 is consistent such that initially satisfied deep dependencies are not
6440                 broken in the new graph. Initially unsatisfied dependencies are
6441                 irrelevant since we only want to avoid breaking dependencies that are
6442                 intially satisfied.
6443
6444                 Since this method can consume enough time to disturb users, it is
6445                 currently only enabled by the --complete-graph option.
6446                 """
6447                 if "--buildpkgonly" in self.myopts or \
6448                         "recurse" not in self.myparams:
6449                         return 1
6450
6451                 if "complete" not in self.myparams:
6452                         # Skip this to avoid consuming enough time to disturb users.
6453                         return 1
6454
6455                 # Put the depgraph into a mode that causes it to only
6456                 # select packages that have already been added to the
6457                 # graph or those that are installed and have not been
6458                 # scheduled for replacement. Also, toggle the "deep"
6459                 # parameter so that all dependencies are traversed and
6460                 # accounted for.
6461                 self._select_atoms = self._select_atoms_from_graph
6462                 self._select_package = self._select_pkg_from_graph
6463                 already_deep = "deep" in self.myparams
6464                 if not already_deep:
6465                         self.myparams.add("deep")
6466
6467                 for root in self.roots:
6468                         required_set_names = self._required_set_names.copy()
6469                         if root == self.target_root and \
6470                                 (already_deep or "empty" in self.myparams):
6471                                 required_set_names.difference_update(self._sets)
6472                         if not required_set_names and not self._ignored_deps:
6473                                 continue
6474                         root_config = self.roots[root]
6475                         setconfig = root_config.setconfig
6476                         args = []
6477                         # Reuse existing SetArg instances when available.
6478                         for arg in self.digraph.root_nodes():
6479                                 if not isinstance(arg, SetArg):
6480                                         continue
6481                                 if arg.root_config != root_config:
6482                                         continue
6483                                 if arg.name in required_set_names:
6484                                         args.append(arg)
6485                                         required_set_names.remove(arg.name)
6486                         # Create new SetArg instances only when necessary.
6487                         for s in required_set_names:
6488                                 expanded_set = InternalPackageSet(
6489                                         initial_atoms=setconfig.getSetAtoms(s))
6490                                 atom = SETPREFIX + s
6491                                 args.append(SetArg(arg=atom, set=expanded_set,
6492                                         root_config=root_config))
6493                         vardb = root_config.trees["vartree"].dbapi
6494                         for arg in args:
6495                                 for atom in arg.set:
6496                                         self._dep_stack.append(
6497                                                 Dependency(atom=atom, root=root, parent=arg))
6498                         if self._ignored_deps:
6499                                 self._dep_stack.extend(self._ignored_deps)
6500                                 self._ignored_deps = []
6501                         if not self._create_graph(allow_unsatisfied=True):
6502                                 return 0
6503                         # Check the unsatisfied deps to see if any initially satisfied deps
6504                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6505                         # deps are irrelevant since we only want to avoid breaking deps
6506                         # that are initially satisfied.
6507                         while self._unsatisfied_deps:
6508                                 dep = self._unsatisfied_deps.pop()
6509                                 matches = vardb.match_pkgs(dep.atom)
6510                                 if not matches:
6511                                         self._initially_unsatisfied_deps.append(dep)
6512                                         continue
6513                                 # An scheduled installation broke a deep dependency.
6514                                 # Add the installed package to the graph so that it
6515                                 # will be appropriately reported as a slot collision
6516                                 # (possibly solvable via backtracking).
6517                                 pkg = matches[-1] # highest match
6518                                 if not self._add_pkg(pkg, dep):
6519                                         return 0
6520                                 if not self._create_graph(allow_unsatisfied=True):
6521                                         return 0
6522                 return 1
6523
6524         def _pkg(self, cpv, type_name, root_config, installed=False):
6525                 """
6526                 Get a package instance from the cache, or create a new
6527                 one if necessary. Raises KeyError from aux_get if it
6528                 failures for some reason (package does not exist or is
6529                 corrupt).
6530                 """
6531                 operation = "merge"
6532                 if installed:
6533                         operation = "nomerge"
6534                 pkg = self._pkg_cache.get(
6535                         (type_name, root_config.root, cpv, operation))
6536                 if pkg is None:
6537                         tree_type = self.pkg_tree_map[type_name]
6538                         db = root_config.trees[tree_type].dbapi
6539                         db_keys = list(self._trees_orig[root_config.root][
6540                                 tree_type].dbapi._aux_cache_keys)
6541                         try:
6542                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6543                         except KeyError:
6544                                 raise portage.exception.PackageNotFound(cpv)
6545                         pkg = Package(cpv=cpv, metadata=metadata,
6546                                 root_config=root_config, installed=installed)
6547                         if type_name == "ebuild":
6548                                 settings = self.pkgsettings[root_config.root]
6549                                 settings.setcpv(pkg)
6550                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6551                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6552                         self._pkg_cache[pkg] = pkg
6553                 return pkg
6554
6555         def validate_blockers(self):
6556                 """Remove any blockers from the digraph that do not match any of the
6557                 packages within the graph.  If necessary, create hard deps to ensure
6558                 correct merge order such that mutually blocking packages are never
6559                 installed simultaneously."""
6560
6561                 if "--buildpkgonly" in self.myopts or \
6562                         "--nodeps" in self.myopts:
6563                         return True
6564
6565                 #if "deep" in self.myparams:
6566                 if True:
6567                         # Pull in blockers from all installed packages that haven't already
6568                         # been pulled into the depgraph.  This is not enabled by default
6569                         # due to the performance penalty that is incurred by all the
6570                         # additional dep_check calls that are required.
6571
6572                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6573                         for myroot in self.trees:
6574                                 vardb = self.trees[myroot]["vartree"].dbapi
6575                                 portdb = self.trees[myroot]["porttree"].dbapi
6576                                 pkgsettings = self.pkgsettings[myroot]
6577                                 final_db = self.mydbapi[myroot]
6578
6579                                 blocker_cache = BlockerCache(myroot, vardb)
6580                                 stale_cache = set(blocker_cache)
6581                                 for pkg in vardb:
6582                                         cpv = pkg.cpv
6583                                         stale_cache.discard(cpv)
6584                                         pkg_in_graph = self.digraph.contains(pkg)
6585
6586                                         # Check for masked installed packages. Only warn about
6587                                         # packages that are in the graph in order to avoid warning
6588                                         # about those that will be automatically uninstalled during
6589                                         # the merge process or by --depclean.
6590                                         if pkg in final_db:
6591                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6592                                                         self._masked_installed.add(pkg)
6593
6594                                         blocker_atoms = None
6595                                         blockers = None
6596                                         if pkg_in_graph:
6597                                                 blockers = []
6598                                                 try:
6599                                                         blockers.extend(
6600                                                                 self._blocker_parents.child_nodes(pkg))
6601                                                 except KeyError:
6602                                                         pass
6603                                                 try:
6604                                                         blockers.extend(
6605                                                                 self._irrelevant_blockers.child_nodes(pkg))
6606                                                 except KeyError:
6607                                                         pass
6608                                         if blockers is not None:
6609                                                 blockers = set(str(blocker.atom) \
6610                                                         for blocker in blockers)
6611
6612                                         # If this node has any blockers, create a "nomerge"
6613                                         # node for it so that they can be enforced.
6614                                         self.spinner.update()
6615                                         blocker_data = blocker_cache.get(cpv)
6616                                         if blocker_data is not None and \
6617                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6618                                                 blocker_data = None
6619
6620                                         # If blocker data from the graph is available, use
6621                                         # it to validate the cache and update the cache if
6622                                         # it seems invalid.
6623                                         if blocker_data is not None and \
6624                                                 blockers is not None:
6625                                                 if not blockers.symmetric_difference(
6626                                                         blocker_data.atoms):
6627                                                         continue
6628                                                 blocker_data = None
6629
6630                                         if blocker_data is None and \
6631                                                 blockers is not None:
6632                                                 # Re-use the blockers from the graph.
6633                                                 blocker_atoms = sorted(blockers)
6634                                                 counter = long(pkg.metadata["COUNTER"])
6635                                                 blocker_data = \
6636                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6637                                                 blocker_cache[pkg.cpv] = blocker_data
6638                                                 continue
6639
6640                                         if blocker_data:
6641                                                 blocker_atoms = blocker_data.atoms
6642                                         else:
6643                                                 # Use aux_get() to trigger FakeVartree global
6644                                                 # updates on *DEPEND when appropriate.
6645                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6646                                                 # It is crucial to pass in final_db here in order to
6647                                                 # optimize dep_check calls by eliminating atoms via
6648                                                 # dep_wordreduce and dep_eval calls.
6649                                                 try:
6650                                                         portage.dep._dep_check_strict = False
6651                                                         try:
6652                                                                 success, atoms = portage.dep_check(depstr,
6653                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6654                                                                         trees=self._graph_trees, myroot=myroot)
6655                                                         except Exception, e:
6656                                                                 if isinstance(e, SystemExit):
6657                                                                         raise
6658                                                                 # This is helpful, for example, if a ValueError
6659                                                                 # is thrown from cpv_expand due to multiple
6660                                                                 # matches (this can happen if an atom lacks a
6661                                                                 # category).
6662                                                                 show_invalid_depstring_notice(
6663                                                                         pkg, depstr, str(e))
6664                                                                 del e
6665                                                                 raise
6666                                                 finally:
6667                                                         portage.dep._dep_check_strict = True
6668                                                 if not success:
6669                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6670                                                         if replacement_pkg and \
6671                                                                 replacement_pkg[0].operation == "merge":
6672                                                                 # This package is being replaced anyway, so
6673                                                                 # ignore invalid dependencies so as not to
6674                                                                 # annoy the user too much (otherwise they'd be
6675                                                                 # forced to manually unmerge it first).
6676                                                                 continue
6677                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6678                                                         return False
6679                                                 blocker_atoms = [myatom for myatom in atoms \
6680                                                         if myatom.startswith("!")]
6681                                                 blocker_atoms.sort()
6682                                                 counter = long(pkg.metadata["COUNTER"])
6683                                                 blocker_cache[cpv] = \
6684                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6685                                         if blocker_atoms:
6686                                                 try:
6687                                                         for atom in blocker_atoms:
6688                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6689                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6690                                                                 self._blocker_parents.add(blocker, pkg)
6691                                                 except portage.exception.InvalidAtom, e:
6692                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6693                                                         show_invalid_depstring_notice(
6694                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6695                                                         return False
6696                                 for cpv in stale_cache:
6697                                         del blocker_cache[cpv]
6698                                 blocker_cache.flush()
6699                                 del blocker_cache
6700
6701                 # Discard any "uninstall" tasks scheduled by previous calls
6702                 # to this method, since those tasks may not make sense given
6703                 # the current graph state.
6704                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6705                 if previous_uninstall_tasks:
6706                         self._blocker_uninstalls = digraph()
6707                         self.digraph.difference_update(previous_uninstall_tasks)
6708
6709                 for blocker in self._blocker_parents.leaf_nodes():
6710                         self.spinner.update()
6711                         root_config = self.roots[blocker.root]
6712                         virtuals = root_config.settings.getvirtuals()
6713                         myroot = blocker.root
6714                         initial_db = self.trees[myroot]["vartree"].dbapi
6715                         final_db = self.mydbapi[myroot]
6716                         
6717                         provider_virtual = False
6718                         if blocker.cp in virtuals and \
6719                                 not self._have_new_virt(blocker.root, blocker.cp):
6720                                 provider_virtual = True
6721
6722                         if provider_virtual:
6723                                 atoms = []
6724                                 for provider_entry in virtuals[blocker.cp]:
6725                                         provider_cp = \
6726                                                 portage.dep_getkey(provider_entry)
6727                                         atoms.append(blocker.atom.replace(
6728                                                 blocker.cp, provider_cp))
6729                         else:
6730                                 atoms = [blocker.atom]
6731
6732                         blocked_initial = []
6733                         for atom in atoms:
6734                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6735
6736                         blocked_final = []
6737                         for atom in atoms:
6738                                 blocked_final.extend(final_db.match_pkgs(atom))
6739
6740                         if not blocked_initial and not blocked_final:
6741                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6742                                 self._blocker_parents.remove(blocker)
6743                                 # Discard any parents that don't have any more blockers.
6744                                 for pkg in parent_pkgs:
6745                                         self._irrelevant_blockers.add(blocker, pkg)
6746                                         if not self._blocker_parents.child_nodes(pkg):
6747                                                 self._blocker_parents.remove(pkg)
6748                                 continue
6749                         for parent in self._blocker_parents.parent_nodes(blocker):
6750                                 unresolved_blocks = False
6751                                 depends_on_order = set()
6752                                 for pkg in blocked_initial:
6753                                         if pkg.slot_atom == parent.slot_atom:
6754                                                 # TODO: Support blocks within slots in cases where it
6755                                                 # might make sense.  For example, a new version might
6756                                                 # require that the old version be uninstalled at build
6757                                                 # time.
6758                                                 continue
6759                                         if parent.installed:
6760                                                 # Two currently installed packages conflict with
6761                                                 # eachother. Ignore this case since the damage
6762                                                 # is already done and this would be likely to
6763                                                 # confuse users if displayed like a normal blocker.
6764                                                 continue
6765
6766                                         self._blocked_pkgs.add(pkg, blocker)
6767
6768                                         if parent.operation == "merge":
6769                                                 # Maybe the blocked package can be replaced or simply
6770                                                 # unmerged to resolve this block.
6771                                                 depends_on_order.add((pkg, parent))
6772                                                 continue
6773                                         # None of the above blocker resolutions techniques apply,
6774                                         # so apparently this one is unresolvable.
6775                                         unresolved_blocks = True
6776                                 for pkg in blocked_final:
6777                                         if pkg.slot_atom == parent.slot_atom:
6778                                                 # TODO: Support blocks within slots.
6779                                                 continue
6780                                         if parent.operation == "nomerge" and \
6781                                                 pkg.operation == "nomerge":
6782                                                 # This blocker will be handled the next time that a
6783                                                 # merge of either package is triggered.
6784                                                 continue
6785
6786                                         self._blocked_pkgs.add(pkg, blocker)
6787
6788                                         # Maybe the blocking package can be
6789                                         # unmerged to resolve this block.
6790                                         if parent.operation == "merge" and pkg.installed:
6791                                                 depends_on_order.add((pkg, parent))
6792                                                 continue
6793                                         elif parent.operation == "nomerge":
6794                                                 depends_on_order.add((parent, pkg))
6795                                                 continue
6796                                         # None of the above blocker resolutions techniques apply,
6797                                         # so apparently this one is unresolvable.
6798                                         unresolved_blocks = True
6799
6800                                 # Make sure we don't unmerge any package that have been pulled
6801                                 # into the graph.
6802                                 if not unresolved_blocks and depends_on_order:
6803                                         for inst_pkg, inst_task in depends_on_order:
6804                                                 if self.digraph.contains(inst_pkg) and \
6805                                                         self.digraph.parent_nodes(inst_pkg):
6806                                                         unresolved_blocks = True
6807                                                         break
6808
6809                                 if not unresolved_blocks and depends_on_order:
6810                                         for inst_pkg, inst_task in depends_on_order:
6811                                                 uninst_task = Package(built=inst_pkg.built,
6812                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6813                                                         metadata=inst_pkg.metadata,
6814                                                         operation="uninstall",
6815                                                         root_config=inst_pkg.root_config,
6816                                                         type_name=inst_pkg.type_name)
6817                                                 self._pkg_cache[uninst_task] = uninst_task
6818                                                 # Enforce correct merge order with a hard dep.
6819                                                 self.digraph.addnode(uninst_task, inst_task,
6820                                                         priority=BlockerDepPriority.instance)
6821                                                 # Count references to this blocker so that it can be
6822                                                 # invalidated after nodes referencing it have been
6823                                                 # merged.
6824                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6825                                 if not unresolved_blocks and not depends_on_order:
6826                                         self._irrelevant_blockers.add(blocker, parent)
6827                                         self._blocker_parents.remove_edge(blocker, parent)
6828                                         if not self._blocker_parents.parent_nodes(blocker):
6829                                                 self._blocker_parents.remove(blocker)
6830                                         if not self._blocker_parents.child_nodes(parent):
6831                                                 self._blocker_parents.remove(parent)
6832                                 if unresolved_blocks:
6833                                         self._unsolvable_blockers.add(blocker, parent)
6834
6835                 return True
6836
6837         def _accept_blocker_conflicts(self):
6838                 acceptable = False
6839                 for x in ("--buildpkgonly", "--fetchonly",
6840                         "--fetch-all-uri", "--nodeps"):
6841                         if x in self.myopts:
6842                                 acceptable = True
6843                                 break
6844                 return acceptable
6845
6846         def _merge_order_bias(self, mygraph):
6847                 """
6848                 For optimal leaf node selection, promote deep system runtime deps and
6849                 order nodes from highest to lowest overall reference count.
6850                 """
6851
6852                 node_info = {}
6853                 for node in mygraph.order:
6854                         node_info[node] = len(mygraph.parent_nodes(node))
6855                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6856
6857                 def cmp_merge_preference(node1, node2):
6858
6859                         if node1.operation == 'uninstall':
6860                                 if node2.operation == 'uninstall':
6861                                         return 0
6862                                 return 1
6863
6864                         if node2.operation == 'uninstall':
6865                                 if node1.operation == 'uninstall':
6866                                         return 0
6867                                 return -1
6868
6869                         node1_sys = node1 in deep_system_deps
6870                         node2_sys = node2 in deep_system_deps
6871                         if node1_sys != node2_sys:
6872                                 if node1_sys:
6873                                         return -1
6874                                 return 1
6875
6876                         return node_info[node2] - node_info[node1]
6877
6878                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6879
6880         def altlist(self, reversed=False):
6881
6882                 while self._serialized_tasks_cache is None:
6883                         self._resolve_conflicts()
6884                         try:
6885                                 self._serialized_tasks_cache, self._scheduler_graph = \
6886                                         self._serialize_tasks()
6887                         except self._serialize_tasks_retry:
6888                                 pass
6889
6890                 retlist = self._serialized_tasks_cache[:]
6891                 if reversed:
6892                         retlist.reverse()
6893                 return retlist
6894
6895         def schedulerGraph(self):
6896                 """
6897                 The scheduler graph is identical to the normal one except that
6898                 uninstall edges are reversed in specific cases that require
6899                 conflicting packages to be temporarily installed simultaneously.
6900                 This is intended for use by the Scheduler in it's parallelization
6901                 logic. It ensures that temporary simultaneous installation of
6902                 conflicting packages is avoided when appropriate (especially for
6903                 !!atom blockers), but allowed in specific cases that require it.
6904
6905                 Note that this method calls break_refs() which alters the state of
6906                 internal Package instances such that this depgraph instance should
6907                 not be used to perform any more calculations.
6908                 """
6909                 if self._scheduler_graph is None:
6910                         self.altlist()
6911                 self.break_refs(self._scheduler_graph.order)
6912                 return self._scheduler_graph
6913
6914         def break_refs(self, nodes):
6915                 """
6916                 Take a mergelist like that returned from self.altlist() and
6917                 break any references that lead back to the depgraph. This is
6918                 useful if you want to hold references to packages without
6919                 also holding the depgraph on the heap.
6920                 """
6921                 for node in nodes:
6922                         if hasattr(node, "root_config"):
6923                                 # The FakeVartree references the _package_cache which
6924                                 # references the depgraph. So that Package instances don't
6925                                 # hold the depgraph and FakeVartree on the heap, replace
6926                                 # the RootConfig that references the FakeVartree with the
6927                                 # original RootConfig instance which references the actual
6928                                 # vartree.
6929                                 node.root_config = \
6930                                         self._trees_orig[node.root_config.root]["root_config"]
6931
6932         def _resolve_conflicts(self):
6933                 if not self._complete_graph():
6934                         raise self._unknown_internal_error()
6935
6936                 if not self.validate_blockers():
6937                         raise self._unknown_internal_error()
6938
6939                 if self._slot_collision_info:
6940                         self._process_slot_conflicts()
6941
6942         def _serialize_tasks(self):
6943
6944                 if "--debug" in self.myopts:
6945                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6946                         self.digraph.debug_print()
6947                         writemsg("\n", noiselevel=-1)
6948
6949                 scheduler_graph = self.digraph.copy()
6950                 mygraph=self.digraph.copy()
6951                 # Prune "nomerge" root nodes if nothing depends on them, since
6952                 # otherwise they slow down merge order calculation. Don't remove
6953                 # non-root nodes since they help optimize merge order in some cases
6954                 # such as revdep-rebuild.
6955                 removed_nodes = set()
6956                 while True:
6957                         for node in mygraph.root_nodes():
6958                                 if not isinstance(node, Package) or \
6959                                         node.installed or node.onlydeps:
6960                                         removed_nodes.add(node)
6961                         if removed_nodes:
6962                                 self.spinner.update()
6963                                 mygraph.difference_update(removed_nodes)
6964                         if not removed_nodes:
6965                                 break
6966                         removed_nodes.clear()
6967                 self._merge_order_bias(mygraph)
6968                 def cmp_circular_bias(n1, n2):
6969                         """
6970                         RDEPEND is stronger than PDEPEND and this function
6971                         measures such a strength bias within a circular
6972                         dependency relationship.
6973                         """
6974                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6975                                 ignore_priority=priority_range.ignore_medium_soft)
6976                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6977                                 ignore_priority=priority_range.ignore_medium_soft)
6978                         if n1_n2_medium == n2_n1_medium:
6979                                 return 0
6980                         elif n1_n2_medium:
6981                                 return 1
6982                         return -1
6983                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6984                 retlist=[]
6985                 # Contains uninstall tasks that have been scheduled to
6986                 # occur after overlapping blockers have been installed.
6987                 scheduled_uninstalls = set()
6988                 # Contains any Uninstall tasks that have been ignored
6989                 # in order to avoid the circular deps code path. These
6990                 # correspond to blocker conflicts that could not be
6991                 # resolved.
6992                 ignored_uninstall_tasks = set()
6993                 have_uninstall_task = False
6994                 complete = "complete" in self.myparams
6995                 asap_nodes = []
6996
6997                 def get_nodes(**kwargs):
6998                         """
6999                         Returns leaf nodes excluding Uninstall instances
7000                         since those should be executed as late as possible.
7001                         """
7002                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7003                                 if isinstance(node, Package) and \
7004                                         (node.operation != "uninstall" or \
7005                                         node in scheduled_uninstalls)]
7006
7007                 # sys-apps/portage needs special treatment if ROOT="/"
7008                 running_root = self._running_root.root
7009                 from portage.const import PORTAGE_PACKAGE_ATOM
7010                 runtime_deps = InternalPackageSet(
7011                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7012                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7013                         PORTAGE_PACKAGE_ATOM)
7014                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7015                         PORTAGE_PACKAGE_ATOM)
7016
7017                 if running_portage:
7018                         running_portage = running_portage[0]
7019                 else:
7020                         running_portage = None
7021
7022                 if replacement_portage:
7023                         replacement_portage = replacement_portage[0]
7024                 else:
7025                         replacement_portage = None
7026
7027                 if replacement_portage == running_portage:
7028                         replacement_portage = None
7029
7030                 if replacement_portage is not None:
7031                         # update from running_portage to replacement_portage asap
7032                         asap_nodes.append(replacement_portage)
7033
7034                 if running_portage is not None:
7035                         try:
7036                                 portage_rdepend = self._select_atoms_highest_available(
7037                                         running_root, running_portage.metadata["RDEPEND"],
7038                                         myuse=running_portage.use.enabled,
7039                                         parent=running_portage, strict=False)
7040                         except portage.exception.InvalidDependString, e:
7041                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7042                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7043                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7044                                 del e
7045                                 portage_rdepend = []
7046                         runtime_deps.update(atom for atom in portage_rdepend \
7047                                 if not atom.startswith("!"))
7048
7049                 def gather_deps(ignore_priority, mergeable_nodes,
7050                         selected_nodes, node):
7051                         """
7052                         Recursively gather a group of nodes that RDEPEND on
7053                         eachother. This ensures that they are merged as a group
7054                         and get their RDEPENDs satisfied as soon as possible.
7055                         """
7056                         if node in selected_nodes:
7057                                 return True
7058                         if node not in mergeable_nodes:
7059                                 return False
7060                         if node == replacement_portage and \
7061                                 mygraph.child_nodes(node,
7062                                 ignore_priority=priority_range.ignore_medium_soft):
7063                                 # Make sure that portage always has all of it's
7064                                 # RDEPENDs installed first.
7065                                 return False
7066                         selected_nodes.add(node)
7067                         for child in mygraph.child_nodes(node,
7068                                 ignore_priority=ignore_priority):
7069                                 if not gather_deps(ignore_priority,
7070                                         mergeable_nodes, selected_nodes, child):
7071                                         return False
7072                         return True
7073
7074                 def ignore_uninst_or_med(priority):
7075                         if priority is BlockerDepPriority.instance:
7076                                 return True
7077                         return priority_range.ignore_medium(priority)
7078
7079                 def ignore_uninst_or_med_soft(priority):
7080                         if priority is BlockerDepPriority.instance:
7081                                 return True
7082                         return priority_range.ignore_medium_soft(priority)
7083
7084                 tree_mode = "--tree" in self.myopts
7085                 # Tracks whether or not the current iteration should prefer asap_nodes
7086                 # if available.  This is set to False when the previous iteration
7087                 # failed to select any nodes.  It is reset whenever nodes are
7088                 # successfully selected.
7089                 prefer_asap = True
7090
7091                 # Controls whether or not the current iteration should drop edges that
7092                 # are "satisfied" by installed packages, in order to solve circular
7093                 # dependencies. The deep runtime dependencies of installed packages are
7094                 # not checked in this case (bug #199856), so it must be avoided
7095                 # whenever possible.
7096                 drop_satisfied = False
7097
7098                 # State of variables for successive iterations that loosen the
7099                 # criteria for node selection.
7100                 #
7101                 # iteration   prefer_asap   drop_satisfied
7102                 # 1           True          False
7103                 # 2           False         False
7104                 # 3           False         True
7105                 #
7106                 # If no nodes are selected on the last iteration, it is due to
7107                 # unresolved blockers or circular dependencies.
7108
7109                 while not mygraph.empty():
7110                         self.spinner.update()
7111                         selected_nodes = None
7112                         ignore_priority = None
7113                         if drop_satisfied or (prefer_asap and asap_nodes):
7114                                 priority_range = DepPrioritySatisfiedRange
7115                         else:
7116                                 priority_range = DepPriorityNormalRange
7117                         if prefer_asap and asap_nodes:
7118                                 # ASAP nodes are merged before their soft deps. Go ahead and
7119                                 # select root nodes here if necessary, since it's typical for
7120                                 # the parent to have been removed from the graph already.
7121                                 asap_nodes = [node for node in asap_nodes \
7122                                         if mygraph.contains(node)]
7123                                 for node in asap_nodes:
7124                                         if not mygraph.child_nodes(node,
7125                                                 ignore_priority=priority_range.ignore_soft):
7126                                                 selected_nodes = [node]
7127                                                 asap_nodes.remove(node)
7128                                                 break
7129                         if not selected_nodes and \
7130                                 not (prefer_asap and asap_nodes):
7131                                 for i in xrange(priority_range.NONE,
7132                                         priority_range.MEDIUM_SOFT + 1):
7133                                         ignore_priority = priority_range.ignore_priority[i]
7134                                         nodes = get_nodes(ignore_priority=ignore_priority)
7135                                         if nodes:
7136                                                 # If there is a mix of uninstall nodes with other
7137                                                 # types, save the uninstall nodes for later since
7138                                                 # sometimes a merge node will render an uninstall
7139                                                 # node unnecessary (due to occupying the same slot),
7140                                                 # and we want to avoid executing a separate uninstall
7141                                                 # task in that case.
7142                                                 if len(nodes) > 1:
7143                                                         good_uninstalls = []
7144                                                         with_some_uninstalls_excluded = []
7145                                                         for node in nodes:
7146                                                                 if node.operation == "uninstall":
7147                                                                         slot_node = self.mydbapi[node.root
7148                                                                                 ].match_pkgs(node.slot_atom)
7149                                                                         if slot_node and \
7150                                                                                 slot_node[0].operation == "merge":
7151                                                                                 continue
7152                                                                         good_uninstalls.append(node)
7153                                                                 with_some_uninstalls_excluded.append(node)
7154                                                         if good_uninstalls:
7155                                                                 nodes = good_uninstalls
7156                                                         elif with_some_uninstalls_excluded:
7157                                                                 nodes = with_some_uninstalls_excluded
7158                                                         else:
7159                                                                 nodes = nodes
7160
7161                                                 if ignore_priority is None and not tree_mode:
7162                                                         # Greedily pop all of these nodes since no
7163                                                         # relationship has been ignored. This optimization
7164                                                         # destroys --tree output, so it's disabled in tree
7165                                                         # mode.
7166                                                         selected_nodes = nodes
7167                                                 else:
7168                                                         # For optimal merge order:
7169                                                         #  * Only pop one node.
7170                                                         #  * Removing a root node (node without a parent)
7171                                                         #    will not produce a leaf node, so avoid it.
7172                                                         #  * It's normal for a selected uninstall to be a
7173                                                         #    root node, so don't check them for parents.
7174                                                         for node in nodes:
7175                                                                 if node.operation == "uninstall" or \
7176                                                                         mygraph.parent_nodes(node):
7177                                                                         selected_nodes = [node]
7178                                                                         break
7179
7180                                                 if selected_nodes:
7181                                                         break
7182
7183                         if not selected_nodes:
7184                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7185                                 if nodes:
7186                                         mergeable_nodes = set(nodes)
7187                                         if prefer_asap and asap_nodes:
7188                                                 nodes = asap_nodes
7189                                         for i in xrange(priority_range.SOFT,
7190                                                 priority_range.MEDIUM_SOFT + 1):
7191                                                 ignore_priority = priority_range.ignore_priority[i]
7192                                                 for node in nodes:
7193                                                         if not mygraph.parent_nodes(node):
7194                                                                 continue
7195                                                         selected_nodes = set()
7196                                                         if gather_deps(ignore_priority,
7197                                                                 mergeable_nodes, selected_nodes, node):
7198                                                                 break
7199                                                         else:
7200                                                                 selected_nodes = None
7201                                                 if selected_nodes:
7202                                                         break
7203
7204                                         if prefer_asap and asap_nodes and not selected_nodes:
7205                                                 # We failed to find any asap nodes to merge, so ignore
7206                                                 # them for the next iteration.
7207                                                 prefer_asap = False
7208                                                 continue
7209
7210                         if selected_nodes and ignore_priority is not None:
7211                                 # Try to merge ignored medium_soft deps as soon as possible
7212                                 # if they're not satisfied by installed packages.
7213                                 for node in selected_nodes:
7214                                         children = set(mygraph.child_nodes(node))
7215                                         soft = children.difference(
7216                                                 mygraph.child_nodes(node,
7217                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7218                                         medium_soft = children.difference(
7219                                                 mygraph.child_nodes(node,
7220                                                         ignore_priority = \
7221                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7222                                         medium_soft.difference_update(soft)
7223                                         for child in medium_soft:
7224                                                 if child in selected_nodes:
7225                                                         continue
7226                                                 if child in asap_nodes:
7227                                                         continue
7228                                                 asap_nodes.append(child)
7229
7230                         if selected_nodes and len(selected_nodes) > 1:
7231                                 if not isinstance(selected_nodes, list):
7232                                         selected_nodes = list(selected_nodes)
7233                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7234
7235                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7236                                 # An Uninstall task needs to be executed in order to
7237                                 # avoid conflict if possible.
7238
7239                                 if drop_satisfied:
7240                                         priority_range = DepPrioritySatisfiedRange
7241                                 else:
7242                                         priority_range = DepPriorityNormalRange
7243
7244                                 mergeable_nodes = get_nodes(
7245                                         ignore_priority=ignore_uninst_or_med)
7246
7247                                 min_parent_deps = None
7248                                 uninst_task = None
7249                                 for task in myblocker_uninstalls.leaf_nodes():
7250                                         # Do some sanity checks so that system or world packages
7251                                         # don't get uninstalled inappropriately here (only really
7252                                         # necessary when --complete-graph has not been enabled).
7253
7254                                         if task in ignored_uninstall_tasks:
7255                                                 continue
7256
7257                                         if task in scheduled_uninstalls:
7258                                                 # It's been scheduled but it hasn't
7259                                                 # been executed yet due to dependence
7260                                                 # on installation of blocking packages.
7261                                                 continue
7262
7263                                         root_config = self.roots[task.root]
7264                                         inst_pkg = self._pkg_cache[
7265                                                 ("installed", task.root, task.cpv, "nomerge")]
7266
7267                                         if self.digraph.contains(inst_pkg):
7268                                                 continue
7269
7270                                         forbid_overlap = False
7271                                         heuristic_overlap = False
7272                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7273                                                 if blocker.eapi in ("0", "1"):
7274                                                         heuristic_overlap = True
7275                                                 elif blocker.atom.blocker.overlap.forbid:
7276                                                         forbid_overlap = True
7277                                                         break
7278                                         if forbid_overlap and running_root == task.root:
7279                                                 continue
7280
7281                                         if heuristic_overlap and running_root == task.root:
7282                                                 # Never uninstall sys-apps/portage or it's essential
7283                                                 # dependencies, except through replacement.
7284                                                 try:
7285                                                         runtime_dep_atoms = \
7286                                                                 list(runtime_deps.iterAtomsForPackage(task))
7287                                                 except portage.exception.InvalidDependString, e:
7288                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7289                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7290                                                                 (task.root, task.cpv, e), noiselevel=-1)
7291                                                         del e
7292                                                         continue
7293
7294                                                 # Don't uninstall a runtime dep if it appears
7295                                                 # to be the only suitable one installed.
7296                                                 skip = False
7297                                                 vardb = root_config.trees["vartree"].dbapi
7298                                                 for atom in runtime_dep_atoms:
7299                                                         other_version = None
7300                                                         for pkg in vardb.match_pkgs(atom):
7301                                                                 if pkg.cpv == task.cpv and \
7302                                                                         pkg.metadata["COUNTER"] == \
7303                                                                         task.metadata["COUNTER"]:
7304                                                                         continue
7305                                                                 other_version = pkg
7306                                                                 break
7307                                                         if other_version is None:
7308                                                                 skip = True
7309                                                                 break
7310                                                 if skip:
7311                                                         continue
7312
7313                                                 # For packages in the system set, don't take
7314                                                 # any chances. If the conflict can't be resolved
7315                                                 # by a normal replacement operation then abort.
7316                                                 skip = False
7317                                                 try:
7318                                                         for atom in root_config.sets[
7319                                                                 "system"].iterAtomsForPackage(task):
7320                                                                 skip = True
7321                                                                 break
7322                                                 except portage.exception.InvalidDependString, e:
7323                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7324                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7325                                                                 (task.root, task.cpv, e), noiselevel=-1)
7326                                                         del e
7327                                                         skip = True
7328                                                 if skip:
7329                                                         continue
7330
7331                                         # Note that the world check isn't always
7332                                         # necessary since self._complete_graph() will
7333                                         # add all packages from the system and world sets to the
7334                                         # graph. This just allows unresolved conflicts to be
7335                                         # detected as early as possible, which makes it possible
7336                                         # to avoid calling self._complete_graph() when it is
7337                                         # unnecessary due to blockers triggering an abortion.
7338                                         if not complete:
7339                                                 # For packages in the world set, go ahead an uninstall
7340                                                 # when necessary, as long as the atom will be satisfied
7341                                                 # in the final state.
7342                                                 graph_db = self.mydbapi[task.root]
7343                                                 skip = False
7344                                                 try:
7345                                                         for atom in root_config.sets[
7346                                                                 "world"].iterAtomsForPackage(task):
7347                                                                 satisfied = False
7348                                                                 for pkg in graph_db.match_pkgs(atom):
7349                                                                         if pkg == inst_pkg:
7350                                                                                 continue
7351                                                                         satisfied = True
7352                                                                         break
7353                                                                 if not satisfied:
7354                                                                         skip = True
7355                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7356                                                                         break
7357                                                 except portage.exception.InvalidDependString, e:
7358                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7359                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7360                                                                 (task.root, task.cpv, e), noiselevel=-1)
7361                                                         del e
7362                                                         skip = True
7363                                                 if skip:
7364                                                         continue
7365
7366                                         # Check the deps of parent nodes to ensure that
7367                                         # the chosen task produces a leaf node. Maybe
7368                                         # this can be optimized some more to make the
7369                                         # best possible choice, but the current algorithm
7370                                         # is simple and should be near optimal for most
7371                                         # common cases.
7372                                         mergeable_parent = False
7373                                         parent_deps = set()
7374                                         for parent in mygraph.parent_nodes(task):
7375                                                 parent_deps.update(mygraph.child_nodes(parent,
7376                                                         ignore_priority=priority_range.ignore_medium_soft))
7377                                                 if parent in mergeable_nodes and \
7378                                                         gather_deps(ignore_uninst_or_med_soft,
7379                                                         mergeable_nodes, set(), parent):
7380                                                         mergeable_parent = True
7381
7382                                         if not mergeable_parent:
7383                                                 continue
7384
7385                                         parent_deps.remove(task)
7386                                         if min_parent_deps is None or \
7387                                                 len(parent_deps) < min_parent_deps:
7388                                                 min_parent_deps = len(parent_deps)
7389                                                 uninst_task = task
7390
7391                                 if uninst_task is not None:
7392                                         # The uninstall is performed only after blocking
7393                                         # packages have been merged on top of it. File
7394                                         # collisions between blocking packages are detected
7395                                         # and removed from the list of files to be uninstalled.
7396                                         scheduled_uninstalls.add(uninst_task)
7397                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7398
7399                                         # Reverse the parent -> uninstall edges since we want
7400                                         # to do the uninstall after blocking packages have
7401                                         # been merged on top of it.
7402                                         mygraph.remove(uninst_task)
7403                                         for blocked_pkg in parent_nodes:
7404                                                 mygraph.add(blocked_pkg, uninst_task,
7405                                                         priority=BlockerDepPriority.instance)
7406                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7407                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7408                                                         priority=BlockerDepPriority.instance)
7409
7410                                         # Reset the state variables for leaf node selection and
7411                                         # continue trying to select leaf nodes.
7412                                         prefer_asap = True
7413                                         drop_satisfied = False
7414                                         continue
7415
7416                         if not selected_nodes:
7417                                 # Only select root nodes as a last resort. This case should
7418                                 # only trigger when the graph is nearly empty and the only
7419                                 # remaining nodes are isolated (no parents or children). Since
7420                                 # the nodes must be isolated, ignore_priority is not needed.
7421                                 selected_nodes = get_nodes()
7422
7423                         if not selected_nodes and not drop_satisfied:
7424                                 drop_satisfied = True
7425                                 continue
7426
7427                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7428                                 # If possible, drop an uninstall task here in order to avoid
7429                                 # the circular deps code path. The corresponding blocker will
7430                                 # still be counted as an unresolved conflict.
7431                                 uninst_task = None
7432                                 for node in myblocker_uninstalls.leaf_nodes():
7433                                         try:
7434                                                 mygraph.remove(node)
7435                                         except KeyError:
7436                                                 pass
7437                                         else:
7438                                                 uninst_task = node
7439                                                 ignored_uninstall_tasks.add(node)
7440                                                 break
7441
7442                                 if uninst_task is not None:
7443                                         # Reset the state variables for leaf node selection and
7444                                         # continue trying to select leaf nodes.
7445                                         prefer_asap = True
7446                                         drop_satisfied = False
7447                                         continue
7448
7449                         if not selected_nodes:
7450                                 self._circular_deps_for_display = mygraph
7451                                 raise self._unknown_internal_error()
7452
7453                         # At this point, we've succeeded in selecting one or more nodes, so
7454                         # reset state variables for leaf node selection.
7455                         prefer_asap = True
7456                         drop_satisfied = False
7457
7458                         mygraph.difference_update(selected_nodes)
7459
7460                         for node in selected_nodes:
7461                                 if isinstance(node, Package) and \
7462                                         node.operation == "nomerge":
7463                                         continue
7464
7465                                 # Handle interactions between blockers
7466                                 # and uninstallation tasks.
7467                                 solved_blockers = set()
7468                                 uninst_task = None
7469                                 if isinstance(node, Package) and \
7470                                         "uninstall" == node.operation:
7471                                         have_uninstall_task = True
7472                                         uninst_task = node
7473                                 else:
7474                                         vardb = self.trees[node.root]["vartree"].dbapi
7475                                         previous_cpv = vardb.match(node.slot_atom)
7476                                         if previous_cpv:
7477                                                 # The package will be replaced by this one, so remove
7478                                                 # the corresponding Uninstall task if necessary.
7479                                                 previous_cpv = previous_cpv[0]
7480                                                 uninst_task = \
7481                                                         ("installed", node.root, previous_cpv, "uninstall")
7482                                                 try:
7483                                                         mygraph.remove(uninst_task)
7484                                                 except KeyError:
7485                                                         pass
7486
7487                                 if uninst_task is not None and \
7488                                         uninst_task not in ignored_uninstall_tasks and \
7489                                         myblocker_uninstalls.contains(uninst_task):
7490                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7491                                         myblocker_uninstalls.remove(uninst_task)
7492                                         # Discard any blockers that this Uninstall solves.
7493                                         for blocker in blocker_nodes:
7494                                                 if not myblocker_uninstalls.child_nodes(blocker):
7495                                                         myblocker_uninstalls.remove(blocker)
7496                                                         solved_blockers.add(blocker)
7497
7498                                 retlist.append(node)
7499
7500                                 if (isinstance(node, Package) and \
7501                                         "uninstall" == node.operation) or \
7502                                         (uninst_task is not None and \
7503                                         uninst_task in scheduled_uninstalls):
7504                                         # Include satisfied blockers in the merge list
7505                                         # since the user might be interested and also
7506                                         # it serves as an indicator that blocking packages
7507                                         # will be temporarily installed simultaneously.
7508                                         for blocker in solved_blockers:
7509                                                 retlist.append(Blocker(atom=blocker.atom,
7510                                                         root=blocker.root, eapi=blocker.eapi,
7511                                                         satisfied=True))
7512
7513                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7514                 for node in myblocker_uninstalls.root_nodes():
7515                         unsolvable_blockers.add(node)
7516
7517                 for blocker in unsolvable_blockers:
7518                         retlist.append(blocker)
7519
7520                 # If any Uninstall tasks need to be executed in order
7521                 # to avoid a conflict, complete the graph with any
7522                 # dependencies that may have been initially
7523                 # neglected (to ensure that unsafe Uninstall tasks
7524                 # are properly identified and blocked from execution).
7525                 if have_uninstall_task and \
7526                         not complete and \
7527                         not unsolvable_blockers:
7528                         self.myparams.add("complete")
7529                         raise self._serialize_tasks_retry("")
7530
7531                 if unsolvable_blockers and \
7532                         not self._accept_blocker_conflicts():
7533                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7534                         self._serialized_tasks_cache = retlist[:]
7535                         self._scheduler_graph = scheduler_graph
7536                         raise self._unknown_internal_error()
7537
7538                 if self._slot_collision_info and \
7539                         not self._accept_blocker_conflicts():
7540                         self._serialized_tasks_cache = retlist[:]
7541                         self._scheduler_graph = scheduler_graph
7542                         raise self._unknown_internal_error()
7543
7544                 return retlist, scheduler_graph
7545
7546         def _show_circular_deps(self, mygraph):
7547                 # No leaf nodes are available, so we have a circular
7548                 # dependency panic situation.  Reduce the noise level to a
7549                 # minimum via repeated elimination of root nodes since they
7550                 # have no parents and thus can not be part of a cycle.
7551                 while True:
7552                         root_nodes = mygraph.root_nodes(
7553                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7554                         if not root_nodes:
7555                                 break
7556                         mygraph.difference_update(root_nodes)
7557                 # Display the USE flags that are enabled on nodes that are part
7558                 # of dependency cycles in case that helps the user decide to
7559                 # disable some of them.
7560                 display_order = []
7561                 tempgraph = mygraph.copy()
7562                 while not tempgraph.empty():
7563                         nodes = tempgraph.leaf_nodes()
7564                         if not nodes:
7565                                 node = tempgraph.order[0]
7566                         else:
7567                                 node = nodes[0]
7568                         display_order.append(node)
7569                         tempgraph.remove(node)
7570                 display_order.reverse()
7571                 self.myopts.pop("--quiet", None)
7572                 self.myopts.pop("--verbose", None)
7573                 self.myopts["--tree"] = True
7574                 portage.writemsg("\n\n", noiselevel=-1)
7575                 self.display(display_order)
7576                 prefix = colorize("BAD", " * ")
7577                 portage.writemsg("\n", noiselevel=-1)
7578                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7579                         noiselevel=-1)
7580                 portage.writemsg("\n", noiselevel=-1)
7581                 mygraph.debug_print()
7582                 portage.writemsg("\n", noiselevel=-1)
7583                 portage.writemsg(prefix + "Note that circular dependencies " + \
7584                         "can often be avoided by temporarily\n", noiselevel=-1)
7585                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7586                         "optional dependencies.\n", noiselevel=-1)
7587
7588         def _show_merge_list(self):
7589                 if self._serialized_tasks_cache is not None and \
7590                         not (self._displayed_list and \
7591                         (self._displayed_list == self._serialized_tasks_cache or \
7592                         self._displayed_list == \
7593                                 list(reversed(self._serialized_tasks_cache)))):
7594                         display_list = self._serialized_tasks_cache[:]
7595                         if "--tree" in self.myopts:
7596                                 display_list.reverse()
7597                         self.display(display_list)
7598
7599         def _show_unsatisfied_blockers(self, blockers):
7600                 self._show_merge_list()
7601                 msg = "Error: The above package list contains " + \
7602                         "packages which cannot be installed " + \
7603                         "at the same time on the same system."
7604                 prefix = colorize("BAD", " * ")
7605                 from textwrap import wrap
7606                 portage.writemsg("\n", noiselevel=-1)
7607                 for line in wrap(msg, 70):
7608                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7609
7610                 # Display the conflicting packages along with the packages
7611                 # that pulled them in. This is helpful for troubleshooting
7612                 # cases in which blockers don't solve automatically and
7613                 # the reasons are not apparent from the normal merge list
7614                 # display.
7615
7616                 conflict_pkgs = {}
7617                 for blocker in blockers:
7618                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7619                                 self._blocker_parents.parent_nodes(blocker)):
7620                                 parent_atoms = self._parent_atoms.get(pkg)
7621                                 if not parent_atoms:
7622                                         atom = self._blocked_world_pkgs.get(pkg)
7623                                         if atom is not None:
7624                                                 parent_atoms = set([("@world", atom)])
7625                                 if parent_atoms:
7626                                         conflict_pkgs[pkg] = parent_atoms
7627
7628                 if conflict_pkgs:
7629                         # Reduce noise by pruning packages that are only
7630                         # pulled in by other conflict packages.
7631                         pruned_pkgs = set()
7632                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7633                                 relevant_parent = False
7634                                 for parent, atom in parent_atoms:
7635                                         if parent not in conflict_pkgs:
7636                                                 relevant_parent = True
7637                                                 break
7638                                 if not relevant_parent:
7639                                         pruned_pkgs.add(pkg)
7640                         for pkg in pruned_pkgs:
7641                                 del conflict_pkgs[pkg]
7642
7643                 if conflict_pkgs:
7644                         msg = []
7645                         msg.append("\n")
7646                         indent = "  "
7647                         # Max number of parents shown, to avoid flooding the display.
7648                         max_parents = 3
7649                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7650
7651                                 pruned_list = set()
7652
7653                                 # Prefer packages that are not directly involved in a conflict.
7654                                 for parent_atom in parent_atoms:
7655                                         if len(pruned_list) >= max_parents:
7656                                                 break
7657                                         parent, atom = parent_atom
7658                                         if parent not in conflict_pkgs:
7659                                                 pruned_list.add(parent_atom)
7660
7661                                 for parent_atom in parent_atoms:
7662                                         if len(pruned_list) >= max_parents:
7663                                                 break
7664                                         pruned_list.add(parent_atom)
7665
7666                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7667                                 msg.append(indent + "%s pulled in by\n" % pkg)
7668
7669                                 for parent_atom in pruned_list:
7670                                         parent, atom = parent_atom
7671                                         msg.append(2*indent)
7672                                         if isinstance(parent,
7673                                                 (PackageArg, AtomArg)):
7674                                                 # For PackageArg and AtomArg types, it's
7675                                                 # redundant to display the atom attribute.
7676                                                 msg.append(str(parent))
7677                                         else:
7678                                                 # Display the specific atom from SetArg or
7679                                                 # Package types.
7680                                                 msg.append("%s required by %s" % (atom, parent))
7681                                         msg.append("\n")
7682
7683                                 if omitted_parents:
7684                                         msg.append(2*indent)
7685                                         msg.append("(and %d more)\n" % omitted_parents)
7686
7687                                 msg.append("\n")
7688
7689                         sys.stderr.write("".join(msg))
7690                         sys.stderr.flush()
7691
7692                 if "--quiet" not in self.myopts:
7693                         show_blocker_docs_link()
7694
7695         def display(self, mylist, favorites=[], verbosity=None):
7696
7697                 # This is used to prevent display_problems() from
7698                 # redundantly displaying this exact same merge list
7699                 # again via _show_merge_list().
7700                 self._displayed_list = mylist
7701
7702                 if verbosity is None:
7703                         verbosity = ("--quiet" in self.myopts and 1 or \
7704                                 "--verbose" in self.myopts and 3 or 2)
7705                 favorites_set = InternalPackageSet(favorites)
7706                 oneshot = "--oneshot" in self.myopts or \
7707                         "--onlydeps" in self.myopts
7708                 columns = "--columns" in self.myopts
7709                 changelogs=[]
7710                 p=[]
7711                 blockers = []
7712
7713                 counters = PackageCounters()
7714
7715                 if verbosity == 1 and "--verbose" not in self.myopts:
7716                         def create_use_string(*args):
7717                                 return ""
7718                 else:
7719                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7720                                 old_iuse, old_use,
7721                                 is_new, reinst_flags,
7722                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7723                                 alphabetical=("--alphabetical" in self.myopts)):
7724                                 enabled = []
7725                                 if alphabetical:
7726                                         disabled = enabled
7727                                         removed = enabled
7728                                 else:
7729                                         disabled = []
7730                                         removed = []
7731                                 cur_iuse = set(cur_iuse)
7732                                 enabled_flags = cur_iuse.intersection(cur_use)
7733                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7734                                 any_iuse = cur_iuse.union(old_iuse)
7735                                 any_iuse = list(any_iuse)
7736                                 any_iuse.sort()
7737                                 for flag in any_iuse:
7738                                         flag_str = None
7739                                         isEnabled = False
7740                                         reinst_flag = reinst_flags and flag in reinst_flags
7741                                         if flag in enabled_flags:
7742                                                 isEnabled = True
7743                                                 if is_new or flag in old_use and \
7744                                                         (all_flags or reinst_flag):
7745                                                         flag_str = red(flag)
7746                                                 elif flag not in old_iuse:
7747                                                         flag_str = yellow(flag) + "%*"
7748                                                 elif flag not in old_use:
7749                                                         flag_str = green(flag) + "*"
7750                                         elif flag in removed_iuse:
7751                                                 if all_flags or reinst_flag:
7752                                                         flag_str = yellow("-" + flag) + "%"
7753                                                         if flag in old_use:
7754                                                                 flag_str += "*"
7755                                                         flag_str = "(" + flag_str + ")"
7756                                                         removed.append(flag_str)
7757                                                 continue
7758                                         else:
7759                                                 if is_new or flag in old_iuse and \
7760                                                         flag not in old_use and \
7761                                                         (all_flags or reinst_flag):
7762                                                         flag_str = blue("-" + flag)
7763                                                 elif flag not in old_iuse:
7764                                                         flag_str = yellow("-" + flag)
7765                                                         if flag not in iuse_forced:
7766                                                                 flag_str += "%"
7767                                                 elif flag in old_use:
7768                                                         flag_str = green("-" + flag) + "*"
7769                                         if flag_str:
7770                                                 if flag in iuse_forced:
7771                                                         flag_str = "(" + flag_str + ")"
7772                                                 if isEnabled:
7773                                                         enabled.append(flag_str)
7774                                                 else:
7775                                                         disabled.append(flag_str)
7776
7777                                 if alphabetical:
7778                                         ret = " ".join(enabled)
7779                                 else:
7780                                         ret = " ".join(enabled + disabled + removed)
7781                                 if ret:
7782                                         ret = '%s="%s" ' % (name, ret)
7783                                 return ret
7784
7785                 repo_display = RepoDisplay(self.roots)
7786
7787                 tree_nodes = []
7788                 display_list = []
7789                 mygraph = self.digraph.copy()
7790
7791                 # If there are any Uninstall instances, add the corresponding
7792                 # blockers to the digraph (useful for --tree display).
7793
7794                 executed_uninstalls = set(node for node in mylist \
7795                         if isinstance(node, Package) and node.operation == "unmerge")
7796
7797                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7798                         uninstall_parents = \
7799                                 self._blocker_uninstalls.parent_nodes(uninstall)
7800                         if not uninstall_parents:
7801                                 continue
7802
7803                         # Remove the corresponding "nomerge" node and substitute
7804                         # the Uninstall node.
7805                         inst_pkg = self._pkg_cache[
7806                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7807                         try:
7808                                 mygraph.remove(inst_pkg)
7809                         except KeyError:
7810                                 pass
7811
7812                         try:
7813                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7814                         except KeyError:
7815                                 inst_pkg_blockers = []
7816
7817                         # Break the Package -> Uninstall edges.
7818                         mygraph.remove(uninstall)
7819
7820                         # Resolution of a package's blockers
7821                         # depend on it's own uninstallation.
7822                         for blocker in inst_pkg_blockers:
7823                                 mygraph.add(uninstall, blocker)
7824
7825                         # Expand Package -> Uninstall edges into
7826                         # Package -> Blocker -> Uninstall edges.
7827                         for blocker in uninstall_parents:
7828                                 mygraph.add(uninstall, blocker)
7829                                 for parent in self._blocker_parents.parent_nodes(blocker):
7830                                         if parent != inst_pkg:
7831                                                 mygraph.add(blocker, parent)
7832
7833                         # If the uninstall task did not need to be executed because
7834                         # of an upgrade, display Blocker -> Upgrade edges since the
7835                         # corresponding Blocker -> Uninstall edges will not be shown.
7836                         upgrade_node = \
7837                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7838                         if upgrade_node is not None and \
7839                                 uninstall not in executed_uninstalls:
7840                                 for blocker in uninstall_parents:
7841                                         mygraph.add(upgrade_node, blocker)
7842
7843                 unsatisfied_blockers = []
7844                 i = 0
7845                 depth = 0
7846                 shown_edges = set()
7847                 for x in mylist:
7848                         if isinstance(x, Blocker) and not x.satisfied:
7849                                 unsatisfied_blockers.append(x)
7850                                 continue
7851                         graph_key = x
7852                         if "--tree" in self.myopts:
7853                                 depth = len(tree_nodes)
7854                                 while depth and graph_key not in \
7855                                         mygraph.child_nodes(tree_nodes[depth-1]):
7856                                                 depth -= 1
7857                                 if depth:
7858                                         tree_nodes = tree_nodes[:depth]
7859                                         tree_nodes.append(graph_key)
7860                                         display_list.append((x, depth, True))
7861                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7862                                 else:
7863                                         traversed_nodes = set() # prevent endless circles
7864                                         traversed_nodes.add(graph_key)
7865                                         def add_parents(current_node, ordered):
7866                                                 parent_nodes = None
7867                                                 # Do not traverse to parents if this node is an
7868                                                 # an argument or a direct member of a set that has
7869                                                 # been specified as an argument (system or world).
7870                                                 if current_node not in self._set_nodes:
7871                                                         parent_nodes = mygraph.parent_nodes(current_node)
7872                                                 if parent_nodes:
7873                                                         child_nodes = set(mygraph.child_nodes(current_node))
7874                                                         selected_parent = None
7875                                                         # First, try to avoid a direct cycle.
7876                                                         for node in parent_nodes:
7877                                                                 if not isinstance(node, (Blocker, Package)):
7878                                                                         continue
7879                                                                 if node not in traversed_nodes and \
7880                                                                         node not in child_nodes:
7881                                                                         edge = (current_node, node)
7882                                                                         if edge in shown_edges:
7883                                                                                 continue
7884                                                                         selected_parent = node
7885                                                                         break
7886                                                         if not selected_parent:
7887                                                                 # A direct cycle is unavoidable.
7888                                                                 for node in parent_nodes:
7889                                                                         if not isinstance(node, (Blocker, Package)):
7890                                                                                 continue
7891                                                                         if node not in traversed_nodes:
7892                                                                                 edge = (current_node, node)
7893                                                                                 if edge in shown_edges:
7894                                                                                         continue
7895                                                                                 selected_parent = node
7896                                                                                 break
7897                                                         if selected_parent:
7898                                                                 shown_edges.add((current_node, selected_parent))
7899                                                                 traversed_nodes.add(selected_parent)
7900                                                                 add_parents(selected_parent, False)
7901                                                 display_list.append((current_node,
7902                                                         len(tree_nodes), ordered))
7903                                                 tree_nodes.append(current_node)
7904                                         tree_nodes = []
7905                                         add_parents(graph_key, True)
7906                         else:
7907                                 display_list.append((x, depth, True))
7908                 mylist = display_list
7909                 for x in unsatisfied_blockers:
7910                         mylist.append((x, 0, True))
7911
7912                 last_merge_depth = 0
7913                 for i in xrange(len(mylist)-1,-1,-1):
7914                         graph_key, depth, ordered = mylist[i]
7915                         if not ordered and depth == 0 and i > 0 \
7916                                 and graph_key == mylist[i-1][0] and \
7917                                 mylist[i-1][1] == 0:
7918                                 # An ordered node got a consecutive duplicate when the tree was
7919                                 # being filled in.
7920                                 del mylist[i]
7921                                 continue
7922                         if ordered and graph_key[-1] != "nomerge":
7923                                 last_merge_depth = depth
7924                                 continue
7925                         if depth >= last_merge_depth or \
7926                                 i < len(mylist) - 1 and \
7927                                 depth >= mylist[i+1][1]:
7928                                         del mylist[i]
7929
7930                 from portage import flatten
7931                 from portage.dep import use_reduce, paren_reduce
7932                 # files to fetch list - avoids counting a same file twice
7933                 # in size display (verbose mode)
7934                 myfetchlist=[]
7935
7936                 # Use this set to detect when all the "repoadd" strings are "[0]"
7937                 # and disable the entire repo display in this case.
7938                 repoadd_set = set()
7939
7940                 for mylist_index in xrange(len(mylist)):
7941                         x, depth, ordered = mylist[mylist_index]
7942                         pkg_type = x[0]
7943                         myroot = x[1]
7944                         pkg_key = x[2]
7945                         portdb = self.trees[myroot]["porttree"].dbapi
7946                         bindb  = self.trees[myroot]["bintree"].dbapi
7947                         vardb = self.trees[myroot]["vartree"].dbapi
7948                         vartree = self.trees[myroot]["vartree"]
7949                         pkgsettings = self.pkgsettings[myroot]
7950
7951                         fetch=" "
7952                         indent = " " * depth
7953
7954                         if isinstance(x, Blocker):
7955                                 if x.satisfied:
7956                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7957                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7958                                 else:
7959                                         blocker_style = "PKG_BLOCKER"
7960                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7961                                 if ordered:
7962                                         counters.blocks += 1
7963                                         if x.satisfied:
7964                                                 counters.blocks_satisfied += 1
7965                                 resolved = portage.key_expand(
7966                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7967                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7968                                         addl += " " + colorize(blocker_style, resolved)
7969                                 else:
7970                                         addl = "[%s %s] %s%s" % \
7971                                                 (colorize(blocker_style, "blocks"),
7972                                                 addl, indent, colorize(blocker_style, resolved))
7973                                 block_parents = self._blocker_parents.parent_nodes(x)
7974                                 block_parents = set([pnode[2] for pnode in block_parents])
7975                                 block_parents = ", ".join(block_parents)
7976                                 if resolved!=x[2]:
7977                                         addl += colorize(blocker_style,
7978                                                 " (\"%s\" is blocking %s)") % \
7979                                                 (str(x.atom).lstrip("!"), block_parents)
7980                                 else:
7981                                         addl += colorize(blocker_style,
7982                                                 " (is blocking %s)") % block_parents
7983                                 if isinstance(x, Blocker) and x.satisfied:
7984                                         if columns:
7985                                                 continue
7986                                         p.append(addl)
7987                                 else:
7988                                         blockers.append(addl)
7989                         else:
7990                                 pkg_status = x[3]
7991                                 pkg_merge = ordered and pkg_status == "merge"
7992                                 if not pkg_merge and pkg_status == "merge":
7993                                         pkg_status = "nomerge"
7994                                 built = pkg_type != "ebuild"
7995                                 installed = pkg_type == "installed"
7996                                 pkg = x
7997                                 metadata = pkg.metadata
7998                                 ebuild_path = None
7999                                 repo_name = metadata["repository"]
8000                                 if pkg_type == "ebuild":
8001                                         ebuild_path = portdb.findname(pkg_key)
8002                                         if not ebuild_path: # shouldn't happen
8003                                                 raise portage.exception.PackageNotFound(pkg_key)
8004                                         repo_path_real = os.path.dirname(os.path.dirname(
8005                                                 os.path.dirname(ebuild_path)))
8006                                 else:
8007                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8008                                 pkg_use = list(pkg.use.enabled)
8009                                 try:
8010                                         restrict = flatten(use_reduce(paren_reduce(
8011                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8012                                 except portage.exception.InvalidDependString, e:
8013                                         if not pkg.installed:
8014                                                 show_invalid_depstring_notice(x,
8015                                                         pkg.metadata["RESTRICT"], str(e))
8016                                                 del e
8017                                                 return 1
8018                                         restrict = []
8019                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8020                                         "fetch" in restrict:
8021                                         fetch = red("F")
8022                                         if ordered:
8023                                                 counters.restrict_fetch += 1
8024                                         if portdb.fetch_check(pkg_key, pkg_use):
8025                                                 fetch = green("f")
8026                                                 if ordered:
8027                                                         counters.restrict_fetch_satisfied += 1
8028
8029                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8030                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8031                                 myoldbest = []
8032                                 myinslotlist = None
8033                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8034                                 if vardb.cpv_exists(pkg_key):
8035                                         addl="  "+yellow("R")+fetch+"  "
8036                                         if ordered:
8037                                                 if pkg_merge:
8038                                                         counters.reinst += 1
8039                                                 elif pkg_status == "uninstall":
8040                                                         counters.uninst += 1
8041                                 # filter out old-style virtual matches
8042                                 elif installed_versions and \
8043                                         portage.cpv_getkey(installed_versions[0]) == \
8044                                         portage.cpv_getkey(pkg_key):
8045                                         myinslotlist = vardb.match(pkg.slot_atom)
8046                                         # If this is the first install of a new-style virtual, we
8047                                         # need to filter out old-style virtual matches.
8048                                         if myinslotlist and \
8049                                                 portage.cpv_getkey(myinslotlist[0]) != \
8050                                                 portage.cpv_getkey(pkg_key):
8051                                                 myinslotlist = None
8052                                         if myinslotlist:
8053                                                 myoldbest = myinslotlist[:]
8054                                                 addl = "   " + fetch
8055                                                 if not portage.dep.cpvequal(pkg_key,
8056                                                         portage.best([pkg_key] + myoldbest)):
8057                                                         # Downgrade in slot
8058                                                         addl += turquoise("U")+blue("D")
8059                                                         if ordered:
8060                                                                 counters.downgrades += 1
8061                                                 else:
8062                                                         # Update in slot
8063                                                         addl += turquoise("U") + " "
8064                                                         if ordered:
8065                                                                 counters.upgrades += 1
8066                                         else:
8067                                                 # New slot, mark it new.
8068                                                 addl = " " + green("NS") + fetch + "  "
8069                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8070                                                 if ordered:
8071                                                         counters.newslot += 1
8072
8073                                         if "--changelog" in self.myopts:
8074                                                 inst_matches = vardb.match(pkg.slot_atom)
8075                                                 if inst_matches:
8076                                                         changelogs.extend(self.calc_changelog(
8077                                                                 portdb.findname(pkg_key),
8078                                                                 inst_matches[0], pkg_key))
8079                                 else:
8080                                         addl = " " + green("N") + " " + fetch + "  "
8081                                         if ordered:
8082                                                 counters.new += 1
8083
8084                                 verboseadd = ""
8085                                 repoadd = None
8086
8087                                 if True:
8088                                         # USE flag display
8089                                         forced_flags = set()
8090                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8091                                         forced_flags.update(pkgsettings.useforce)
8092                                         forced_flags.update(pkgsettings.usemask)
8093
8094                                         cur_use = [flag for flag in pkg.use.enabled \
8095                                                 if flag in pkg.iuse.all]
8096                                         cur_iuse = sorted(pkg.iuse.all)
8097
8098                                         if myoldbest and myinslotlist:
8099                                                 previous_cpv = myoldbest[0]
8100                                         else:
8101                                                 previous_cpv = pkg.cpv
8102                                         if vardb.cpv_exists(previous_cpv):
8103                                                 old_iuse, old_use = vardb.aux_get(
8104                                                                 previous_cpv, ["IUSE", "USE"])
8105                                                 old_iuse = list(set(
8106                                                         filter_iuse_defaults(old_iuse.split())))
8107                                                 old_iuse.sort()
8108                                                 old_use = old_use.split()
8109                                                 is_new = False
8110                                         else:
8111                                                 old_iuse = []
8112                                                 old_use = []
8113                                                 is_new = True
8114
8115                                         old_use = [flag for flag in old_use if flag in old_iuse]
8116
8117                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8118                                         use_expand.sort()
8119                                         use_expand.reverse()
8120                                         use_expand_hidden = \
8121                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8122
8123                                         def map_to_use_expand(myvals, forcedFlags=False,
8124                                                 removeHidden=True):
8125                                                 ret = {}
8126                                                 forced = {}
8127                                                 for exp in use_expand:
8128                                                         ret[exp] = []
8129                                                         forced[exp] = set()
8130                                                         for val in myvals[:]:
8131                                                                 if val.startswith(exp.lower()+"_"):
8132                                                                         if val in forced_flags:
8133                                                                                 forced[exp].add(val[len(exp)+1:])
8134                                                                         ret[exp].append(val[len(exp)+1:])
8135                                                                         myvals.remove(val)
8136                                                 ret["USE"] = myvals
8137                                                 forced["USE"] = [val for val in myvals \
8138                                                         if val in forced_flags]
8139                                                 if removeHidden:
8140                                                         for exp in use_expand_hidden:
8141                                                                 ret.pop(exp, None)
8142                                                 if forcedFlags:
8143                                                         return ret, forced
8144                                                 return ret
8145
8146                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8147                                         # are the only thing that triggered reinstallation.
8148                                         reinst_flags_map = {}
8149                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8150                                         reinst_expand_map = None
8151                                         if reinstall_for_flags:
8152                                                 reinst_flags_map = map_to_use_expand(
8153                                                         list(reinstall_for_flags), removeHidden=False)
8154                                                 for k in list(reinst_flags_map):
8155                                                         if not reinst_flags_map[k]:
8156                                                                 del reinst_flags_map[k]
8157                                                 if not reinst_flags_map.get("USE"):
8158                                                         reinst_expand_map = reinst_flags_map.copy()
8159                                                         reinst_expand_map.pop("USE", None)
8160                                         if reinst_expand_map and \
8161                                                 not set(reinst_expand_map).difference(
8162                                                 use_expand_hidden):
8163                                                 use_expand_hidden = \
8164                                                         set(use_expand_hidden).difference(
8165                                                         reinst_expand_map)
8166
8167                                         cur_iuse_map, iuse_forced = \
8168                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8169                                         cur_use_map = map_to_use_expand(cur_use)
8170                                         old_iuse_map = map_to_use_expand(old_iuse)
8171                                         old_use_map = map_to_use_expand(old_use)
8172
8173                                         use_expand.sort()
8174                                         use_expand.insert(0, "USE")
8175                                         
8176                                         for key in use_expand:
8177                                                 if key in use_expand_hidden:
8178                                                         continue
8179                                                 verboseadd += create_use_string(key.upper(),
8180                                                         cur_iuse_map[key], iuse_forced[key],
8181                                                         cur_use_map[key], old_iuse_map[key],
8182                                                         old_use_map[key], is_new,
8183                                                         reinst_flags_map.get(key))
8184
8185                                 if verbosity == 3:
8186                                         # size verbose
8187                                         mysize=0
8188                                         if pkg_type == "ebuild" and pkg_merge:
8189                                                 try:
8190                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8191                                                                 useflags=pkg_use, debug=self.edebug)
8192                                                 except portage.exception.InvalidDependString, e:
8193                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8194                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8195                                                         del e
8196                                                         return 1
8197                                                 if myfilesdict is None:
8198                                                         myfilesdict="[empty/missing/bad digest]"
8199                                                 else:
8200                                                         for myfetchfile in myfilesdict:
8201                                                                 if myfetchfile not in myfetchlist:
8202                                                                         mysize+=myfilesdict[myfetchfile]
8203                                                                         myfetchlist.append(myfetchfile)
8204                                                         if ordered:
8205                                                                 counters.totalsize += mysize
8206                                                 verboseadd += format_size(mysize)
8207
8208                                         # overlay verbose
8209                                         # assign index for a previous version in the same slot
8210                                         has_previous = False
8211                                         repo_name_prev = None
8212                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8213                                                 metadata["SLOT"])
8214                                         slot_matches = vardb.match(slot_atom)
8215                                         if slot_matches:
8216                                                 has_previous = True
8217                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8218                                                         ["repository"])[0]
8219
8220                                         # now use the data to generate output
8221                                         if pkg.installed or not has_previous:
8222                                                 repoadd = repo_display.repoStr(repo_path_real)
8223                                         else:
8224                                                 repo_path_prev = None
8225                                                 if repo_name_prev:
8226                                                         repo_path_prev = portdb.getRepositoryPath(
8227                                                                 repo_name_prev)
8228                                                 if repo_path_prev == repo_path_real:
8229                                                         repoadd = repo_display.repoStr(repo_path_real)
8230                                                 else:
8231                                                         repoadd = "%s=>%s" % (
8232                                                                 repo_display.repoStr(repo_path_prev),
8233                                                                 repo_display.repoStr(repo_path_real))
8234                                         if repoadd:
8235                                                 repoadd_set.add(repoadd)
8236
8237                                 xs = [portage.cpv_getkey(pkg_key)] + \
8238                                         list(portage.catpkgsplit(pkg_key)[2:])
8239                                 if xs[2] == "r0":
8240                                         xs[2] = ""
8241                                 else:
8242                                         xs[2] = "-" + xs[2]
8243
8244                                 mywidth = 130
8245                                 if "COLUMNWIDTH" in self.settings:
8246                                         try:
8247                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8248                                         except ValueError, e:
8249                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8250                                                 portage.writemsg(
8251                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8252                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8253                                                 del e
8254                                 oldlp = mywidth - 30
8255                                 newlp = oldlp - 30
8256
8257                                 # Convert myoldbest from a list to a string.
8258                                 if not myoldbest:
8259                                         myoldbest = ""
8260                                 else:
8261                                         for pos, key in enumerate(myoldbest):
8262                                                 key = portage.catpkgsplit(key)[2] + \
8263                                                         "-" + portage.catpkgsplit(key)[3]
8264                                                 if key[-3:] == "-r0":
8265                                                         key = key[:-3]
8266                                                 myoldbest[pos] = key
8267                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8268
8269                                 pkg_cp = xs[0]
8270                                 root_config = self.roots[myroot]
8271                                 system_set = root_config.sets["system"]
8272                                 world_set  = root_config.sets["world"]
8273
8274                                 pkg_system = False
8275                                 pkg_world = False
8276                                 try:
8277                                         pkg_system = system_set.findAtomForPackage(pkg)
8278                                         pkg_world  = world_set.findAtomForPackage(pkg)
8279                                         if not (oneshot or pkg_world) and \
8280                                                 myroot == self.target_root and \
8281                                                 favorites_set.findAtomForPackage(pkg):
8282                                                 # Maybe it will be added to world now.
8283                                                 if create_world_atom(pkg, favorites_set, root_config):
8284                                                         pkg_world = True
8285                                 except portage.exception.InvalidDependString:
8286                                         # This is reported elsewhere if relevant.
8287                                         pass
8288
8289                                 def pkgprint(pkg_str):
8290                                         if pkg_merge:
8291                                                 if pkg_system:
8292                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8293                                                 elif pkg_world:
8294                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8295                                                 else:
8296                                                         return colorize("PKG_MERGE", pkg_str)
8297                                         elif pkg_status == "uninstall":
8298                                                 return colorize("PKG_UNINSTALL", pkg_str)
8299                                         else:
8300                                                 if pkg_system:
8301                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8302                                                 elif pkg_world:
8303                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8304                                                 else:
8305                                                         return colorize("PKG_NOMERGE", pkg_str)
8306
8307                                 try:
8308                                         properties = flatten(use_reduce(paren_reduce(
8309                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8310                                 except portage.exception.InvalidDependString, e:
8311                                         if not pkg.installed:
8312                                                 show_invalid_depstring_notice(pkg,
8313                                                         pkg.metadata["PROPERTIES"], str(e))
8314                                                 del e
8315                                                 return 1
8316                                         properties = []
8317                                 interactive = "interactive" in properties
8318                                 if interactive and pkg.operation == "merge":
8319                                         addl = colorize("WARN", "I") + addl[1:]
8320                                         if ordered:
8321                                                 counters.interactive += 1
8322
8323                                 if x[1]!="/":
8324                                         if myoldbest:
8325                                                 myoldbest +=" "
8326                                         if "--columns" in self.myopts:
8327                                                 if "--quiet" in self.myopts:
8328                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8329                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8330                                                         myprint=myprint+myoldbest
8331                                                         myprint=myprint+darkgreen("to "+x[1])
8332                                                         verboseadd = None
8333                                                 else:
8334                                                         if not pkg_merge:
8335                                                                 myprint = "[%s] %s%s" % \
8336                                                                         (pkgprint(pkg_status.ljust(13)),
8337                                                                         indent, pkgprint(pkg.cp))
8338                                                         else:
8339                                                                 myprint = "[%s %s] %s%s" % \
8340                                                                         (pkgprint(pkg.type_name), addl,
8341                                                                         indent, pkgprint(pkg.cp))
8342                                                         if (newlp-nc_len(myprint)) > 0:
8343                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8345                                                         if (oldlp-nc_len(myprint)) > 0:
8346                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8347                                                         myprint=myprint+myoldbest
8348                                                         myprint += darkgreen("to " + pkg.root)
8349                                         else:
8350                                                 if not pkg_merge:
8351                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8352                                                 else:
8353                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8354                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8355                                                         myoldbest + darkgreen("to " + myroot)
8356                                 else:
8357                                         if "--columns" in self.myopts:
8358                                                 if "--quiet" in self.myopts:
8359                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8360                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8361                                                         myprint=myprint+myoldbest
8362                                                         verboseadd = None
8363                                                 else:
8364                                                         if not pkg_merge:
8365                                                                 myprint = "[%s] %s%s" % \
8366                                                                         (pkgprint(pkg_status.ljust(13)),
8367                                                                         indent, pkgprint(pkg.cp))
8368                                                         else:
8369                                                                 myprint = "[%s %s] %s%s" % \
8370                                                                         (pkgprint(pkg.type_name), addl,
8371                                                                         indent, pkgprint(pkg.cp))
8372                                                         if (newlp-nc_len(myprint)) > 0:
8373                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8374                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8375                                                         if (oldlp-nc_len(myprint)) > 0:
8376                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8377                                                         myprint += myoldbest
8378                                         else:
8379                                                 if not pkg_merge:
8380                                                         myprint = "[%s] %s%s %s" % \
8381                                                                 (pkgprint(pkg_status.ljust(13)),
8382                                                                 indent, pkgprint(pkg.cpv),
8383                                                                 myoldbest)
8384                                                 else:
8385                                                         myprint = "[%s %s] %s%s %s" % \
8386                                                                 (pkgprint(pkg_type), addl, indent,
8387                                                                 pkgprint(pkg.cpv), myoldbest)
8388
8389                                 if columns and pkg.operation == "uninstall":
8390                                         continue
8391                                 p.append((myprint, verboseadd, repoadd))
8392
8393                                 if "--tree" not in self.myopts and \
8394                                         "--quiet" not in self.myopts and \
8395                                         not self._opts_no_restart.intersection(self.myopts) and \
8396                                         pkg.root == self._running_root.root and \
8397                                         portage.match_from_list(
8398                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8399                                         not vardb.cpv_exists(pkg.cpv) and \
8400                                         "--quiet" not in self.myopts:
8401                                                 if mylist_index < len(mylist) - 1:
8402                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8403                                                         p.append(colorize("WARN", "    then resume the merge."))
8404
8405                 out = sys.stdout
8406                 show_repos = repoadd_set and repoadd_set != set(["0"])
8407
8408                 for x in p:
8409                         if isinstance(x, basestring):
8410                                 out.write("%s\n" % (x,))
8411                                 continue
8412
8413                         myprint, verboseadd, repoadd = x
8414
8415                         if verboseadd:
8416                                 myprint += " " + verboseadd
8417
8418                         if show_repos and repoadd:
8419                                 myprint += " " + teal("[%s]" % repoadd)
8420
8421                         out.write("%s\n" % (myprint,))
8422
8423                 for x in blockers:
8424                         print x
8425
8426                 if verbosity == 3:
8427                         print
8428                         print counters
8429                         if show_repos:
8430                                 sys.stdout.write(str(repo_display))
8431
8432                 if "--changelog" in self.myopts:
8433                         print
8434                         for revision,text in changelogs:
8435                                 print bold('*'+revision)
8436                                 sys.stdout.write(text)
8437
8438                 sys.stdout.flush()
8439                 return os.EX_OK
8440
8441         def display_problems(self):
8442                 """
8443                 Display problems with the dependency graph such as slot collisions.
8444                 This is called internally by display() to show the problems _after_
8445                 the merge list where it is most likely to be seen, but if display()
8446                 is not going to be called then this method should be called explicitly
8447                 to ensure that the user is notified of problems with the graph.
8448
8449                 All output goes to stderr, except for unsatisfied dependencies which
8450                 go to stdout for parsing by programs such as autounmask.
8451                 """
8452
8453                 # Note that show_masked_packages() sends it's output to
8454                 # stdout, and some programs such as autounmask parse the
8455                 # output in cases when emerge bails out. However, when
8456                 # show_masked_packages() is called for installed packages
8457                 # here, the message is a warning that is more appropriate
8458                 # to send to stderr, so temporarily redirect stdout to
8459                 # stderr. TODO: Fix output code so there's a cleaner way
8460                 # to redirect everything to stderr.
8461                 sys.stdout.flush()
8462                 sys.stderr.flush()
8463                 stdout = sys.stdout
8464                 try:
8465                         sys.stdout = sys.stderr
8466                         self._display_problems()
8467                 finally:
8468                         sys.stdout = stdout
8469                         sys.stdout.flush()
8470                         sys.stderr.flush()
8471
8472                 # This goes to stdout for parsing by programs like autounmask.
8473                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8474                         self._show_unsatisfied_dep(*pargs, **kwargs)
8475
8476         def _display_problems(self):
8477                 if self._circular_deps_for_display is not None:
8478                         self._show_circular_deps(
8479                                 self._circular_deps_for_display)
8480
8481                 # The user is only notified of a slot conflict if
8482                 # there are no unresolvable blocker conflicts.
8483                 if self._unsatisfied_blockers_for_display is not None:
8484                         self._show_unsatisfied_blockers(
8485                                 self._unsatisfied_blockers_for_display)
8486                 else:
8487                         self._show_slot_collision_notice()
8488
8489                 # TODO: Add generic support for "set problem" handlers so that
8490                 # the below warnings aren't special cases for world only.
8491
8492                 if self._missing_args:
8493                         world_problems = False
8494                         if "world" in self._sets:
8495                                 # Filter out indirect members of world (from nested sets)
8496                                 # since only direct members of world are desired here.
8497                                 world_set = self.roots[self.target_root].sets["world"]
8498                                 for arg, atom in self._missing_args:
8499                                         if arg.name == "world" and atom in world_set:
8500                                                 world_problems = True
8501                                                 break
8502
8503                         if world_problems:
8504                                 sys.stderr.write("\n!!! Problems have been " + \
8505                                         "detected with your world file\n")
8506                                 sys.stderr.write("!!! Please run " + \
8507                                         green("emaint --check world")+"\n\n")
8508
8509                 if self._missing_args:
8510                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8511                                 " Ebuilds for the following packages are either all\n")
8512                         sys.stderr.write(colorize("BAD", "!!!") + \
8513                                 " masked or don't exist:\n")
8514                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8515                                 self._missing_args) + "\n")
8516
8517                 if self._pprovided_args:
8518                         arg_refs = {}
8519                         for arg, atom in self._pprovided_args:
8520                                 if isinstance(arg, SetArg):
8521                                         parent = arg.name
8522                                         arg_atom = (atom, atom)
8523                                 else:
8524                                         parent = "args"
8525                                         arg_atom = (arg.arg, atom)
8526                                 refs = arg_refs.setdefault(arg_atom, [])
8527                                 if parent not in refs:
8528                                         refs.append(parent)
8529                         msg = []
8530                         msg.append(bad("\nWARNING: "))
8531                         if len(self._pprovided_args) > 1:
8532                                 msg.append("Requested packages will not be " + \
8533                                         "merged because they are listed in\n")
8534                         else:
8535                                 msg.append("A requested package will not be " + \
8536                                         "merged because it is listed in\n")
8537                         msg.append("package.provided:\n\n")
8538                         problems_sets = set()
8539                         for (arg, atom), refs in arg_refs.iteritems():
8540                                 ref_string = ""
8541                                 if refs:
8542                                         problems_sets.update(refs)
8543                                         refs.sort()
8544                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8545                                         ref_string = " pulled in by " + ref_string
8546                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8547                         msg.append("\n")
8548                         if "world" in problems_sets:
8549                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8550                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8551                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8552                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8553                                 msg.append("The best course of action depends on the reason that an offending\n")
8554                                 msg.append("package.provided entry exists.\n\n")
8555                         sys.stderr.write("".join(msg))
8556
8557                 masked_packages = []
8558                 for pkg in self._masked_installed:
8559                         root_config = pkg.root_config
8560                         pkgsettings = self.pkgsettings[pkg.root]
8561                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8562                         masked_packages.append((root_config, pkgsettings,
8563                                 pkg.cpv, pkg.metadata, mreasons))
8564                 if masked_packages:
8565                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8566                                 " The following installed packages are masked:\n")
8567                         show_masked_packages(masked_packages)
8568                         show_mask_docs()
8569                         print
8570
8571         def calc_changelog(self,ebuildpath,current,next):
8572                 if ebuildpath == None or not os.path.exists(ebuildpath):
8573                         return []
8574                 current = '-'.join(portage.catpkgsplit(current)[1:])
8575                 if current.endswith('-r0'):
8576                         current = current[:-3]
8577                 next = '-'.join(portage.catpkgsplit(next)[1:])
8578                 if next.endswith('-r0'):
8579                         next = next[:-3]
8580                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8581                 try:
8582                         changelog = open(changelogpath).read()
8583                 except SystemExit, e:
8584                         raise # Needed else can't exit
8585                 except:
8586                         return []
8587                 divisions = self.find_changelog_tags(changelog)
8588                 #print 'XX from',current,'to',next
8589                 #for div,text in divisions: print 'XX',div
8590                 # skip entries for all revisions above the one we are about to emerge
8591                 for i in range(len(divisions)):
8592                         if divisions[i][0]==next:
8593                                 divisions = divisions[i:]
8594                                 break
8595                 # find out how many entries we are going to display
8596                 for i in range(len(divisions)):
8597                         if divisions[i][0]==current:
8598                                 divisions = divisions[:i]
8599                                 break
8600                 else:
8601                     # couldnt find the current revision in the list. display nothing
8602                         return []
8603                 return divisions
8604
8605         def find_changelog_tags(self,changelog):
8606                 divs = []
8607                 release = None
8608                 while 1:
8609                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8610                         if match is None:
8611                                 if release is not None:
8612                                         divs.append((release,changelog))
8613                                 return divs
8614                         if release is not None:
8615                                 divs.append((release,changelog[:match.start()]))
8616                         changelog = changelog[match.end():]
8617                         release = match.group(1)
8618                         if release.endswith('.ebuild'):
8619                                 release = release[:-7]
8620                         if release.endswith('-r0'):
8621                                 release = release[:-3]
8622
8623         def saveNomergeFavorites(self):
8624                 """Find atoms in favorites that are not in the mergelist and add them
8625                 to the world file if necessary."""
8626                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8627                         "--oneshot", "--onlydeps", "--pretend"):
8628                         if x in self.myopts:
8629                                 return
8630                 root_config = self.roots[self.target_root]
8631                 world_set = root_config.sets["world"]
8632
8633                 world_locked = False
8634                 if hasattr(world_set, "lock"):
8635                         world_set.lock()
8636                         world_locked = True
8637
8638                 if hasattr(world_set, "load"):
8639                         world_set.load() # maybe it's changed on disk
8640
8641                 args_set = self._sets["args"]
8642                 portdb = self.trees[self.target_root]["porttree"].dbapi
8643                 added_favorites = set()
8644                 for x in self._set_nodes:
8645                         pkg_type, root, pkg_key, pkg_status = x
8646                         if pkg_status != "nomerge":
8647                                 continue
8648
8649                         try:
8650                                 myfavkey = create_world_atom(x, args_set, root_config)
8651                                 if myfavkey:
8652                                         if myfavkey in added_favorites:
8653                                                 continue
8654                                         added_favorites.add(myfavkey)
8655                         except portage.exception.InvalidDependString, e:
8656                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8657                                         (pkg_key, str(e)), noiselevel=-1)
8658                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8659                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8660                                 del e
8661                 all_added = []
8662                 for k in self._sets:
8663                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8664                                 continue
8665                         s = SETPREFIX + k
8666                         if s in world_set:
8667                                 continue
8668                         all_added.append(SETPREFIX + k)
8669                 all_added.extend(added_favorites)
8670                 all_added.sort()
8671                 for a in all_added:
8672                         print ">>> Recording %s in \"world\" favorites file..." % \
8673                                 colorize("INFORM", str(a))
8674                 if all_added:
8675                         world_set.update(all_added)
8676
8677                 if world_locked:
8678                         world_set.unlock()
8679
8680         def loadResumeCommand(self, resume_data, skip_masked=False):
8681                 """
8682                 Add a resume command to the graph and validate it in the process.  This
8683                 will raise a PackageNotFound exception if a package is not available.
8684                 """
8685
8686                 if not isinstance(resume_data, dict):
8687                         return False
8688
8689                 mergelist = resume_data.get("mergelist")
8690                 if not isinstance(mergelist, list):
8691                         mergelist = []
8692
8693                 fakedb = self.mydbapi
8694                 trees = self.trees
8695                 serialized_tasks = []
8696                 masked_tasks = []
8697                 for x in mergelist:
8698                         if not (isinstance(x, list) and len(x) == 4):
8699                                 continue
8700                         pkg_type, myroot, pkg_key, action = x
8701                         if pkg_type not in self.pkg_tree_map:
8702                                 continue
8703                         if action != "merge":
8704                                 continue
8705                         tree_type = self.pkg_tree_map[pkg_type]
8706                         mydb = trees[myroot][tree_type].dbapi
8707                         db_keys = list(self._trees_orig[myroot][
8708                                 tree_type].dbapi._aux_cache_keys)
8709                         try:
8710                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8711                         except KeyError:
8712                                 # It does no exist or it is corrupt.
8713                                 if action == "uninstall":
8714                                         continue
8715                                 raise portage.exception.PackageNotFound(pkg_key)
8716                         installed = action == "uninstall"
8717                         built = pkg_type != "ebuild"
8718                         root_config = self.roots[myroot]
8719                         pkg = Package(built=built, cpv=pkg_key,
8720                                 installed=installed, metadata=metadata,
8721                                 operation=action, root_config=root_config,
8722                                 type_name=pkg_type)
8723                         if pkg_type == "ebuild":
8724                                 pkgsettings = self.pkgsettings[myroot]
8725                                 pkgsettings.setcpv(pkg)
8726                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8727                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8728                         self._pkg_cache[pkg] = pkg
8729
8730                         root_config = self.roots[pkg.root]
8731                         if "merge" == pkg.operation and \
8732                                 not visible(root_config.settings, pkg):
8733                                 if skip_masked:
8734                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8735                                 else:
8736                                         self._unsatisfied_deps_for_display.append(
8737                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8738
8739                         fakedb[myroot].cpv_inject(pkg)
8740                         serialized_tasks.append(pkg)
8741                         self.spinner.update()
8742
8743                 if self._unsatisfied_deps_for_display:
8744                         return False
8745
8746                 if not serialized_tasks or "--nodeps" in self.myopts:
8747                         self._serialized_tasks_cache = serialized_tasks
8748                         self._scheduler_graph = self.digraph
8749                 else:
8750                         self._select_package = self._select_pkg_from_graph
8751                         self.myparams.add("selective")
8752                         # Always traverse deep dependencies in order to account for
8753                         # potentially unsatisfied dependencies of installed packages.
8754                         # This is necessary for correct --keep-going or --resume operation
8755                         # in case a package from a group of circularly dependent packages
8756                         # fails. In this case, a package which has recently been installed
8757                         # may have an unsatisfied circular dependency (pulled in by
8758                         # PDEPEND, for example). So, even though a package is already
8759                         # installed, it may not have all of it's dependencies satisfied, so
8760                         # it may not be usable. If such a package is in the subgraph of
8761                         # deep depenedencies of a scheduled build, that build needs to
8762                         # be cancelled. In order for this type of situation to be
8763                         # recognized, deep traversal of dependencies is required.
8764                         self.myparams.add("deep")
8765
8766                         favorites = resume_data.get("favorites")
8767                         args_set = self._sets["args"]
8768                         if isinstance(favorites, list):
8769                                 args = self._load_favorites(favorites)
8770                         else:
8771                                 args = []
8772
8773                         for task in serialized_tasks:
8774                                 if isinstance(task, Package) and \
8775                                         task.operation == "merge":
8776                                         if not self._add_pkg(task, None):
8777                                                 return False
8778
8779                         # Packages for argument atoms need to be explicitly
8780                         # added via _add_pkg() so that they are included in the
8781                         # digraph (needed at least for --tree display).
8782                         for arg in args:
8783                                 for atom in arg.set:
8784                                         pkg, existing_node = self._select_package(
8785                                                 arg.root_config.root, atom)
8786                                         if existing_node is None and \
8787                                                 pkg is not None:
8788                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8789                                                         root=pkg.root, parent=arg)):
8790                                                         return False
8791
8792                         # Allow unsatisfied deps here to avoid showing a masking
8793                         # message for an unsatisfied dep that isn't necessarily
8794                         # masked.
8795                         if not self._create_graph(allow_unsatisfied=True):
8796                                 return False
8797
8798                         unsatisfied_deps = []
8799                         for dep in self._unsatisfied_deps:
8800                                 if not isinstance(dep.parent, Package):
8801                                         continue
8802                                 if dep.parent.operation == "merge":
8803                                         unsatisfied_deps.append(dep)
8804                                         continue
8805
8806                                 # For unsatisfied deps of installed packages, only account for
8807                                 # them if they are in the subgraph of dependencies of a package
8808                                 # which is scheduled to be installed.
8809                                 unsatisfied_install = False
8810                                 traversed = set()
8811                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8812                                 while dep_stack:
8813                                         node = dep_stack.pop()
8814                                         if not isinstance(node, Package):
8815                                                 continue
8816                                         if node.operation == "merge":
8817                                                 unsatisfied_install = True
8818                                                 break
8819                                         if node in traversed:
8820                                                 continue
8821                                         traversed.add(node)
8822                                         dep_stack.extend(self.digraph.parent_nodes(node))
8823
8824                                 if unsatisfied_install:
8825                                         unsatisfied_deps.append(dep)
8826
8827                         if masked_tasks or unsatisfied_deps:
8828                                 # This probably means that a required package
8829                                 # was dropped via --skipfirst. It makes the
8830                                 # resume list invalid, so convert it to a
8831                                 # UnsatisfiedResumeDep exception.
8832                                 raise self.UnsatisfiedResumeDep(self,
8833                                         masked_tasks + unsatisfied_deps)
8834                         self._serialized_tasks_cache = None
8835                         try:
8836                                 self.altlist()
8837                         except self._unknown_internal_error:
8838                                 return False
8839
8840                 return True
8841
8842         def _load_favorites(self, favorites):
8843                 """
8844                 Use a list of favorites to resume state from a
8845                 previous select_files() call. This creates similar
8846                 DependencyArg instances to those that would have
8847                 been created by the original select_files() call.
8848                 This allows Package instances to be matched with
8849                 DependencyArg instances during graph creation.
8850                 """
8851                 root_config = self.roots[self.target_root]
8852                 getSetAtoms = root_config.setconfig.getSetAtoms
8853                 sets = root_config.sets
8854                 args = []
8855                 for x in favorites:
8856                         if not isinstance(x, basestring):
8857                                 continue
8858                         if x in ("system", "world"):
8859                                 x = SETPREFIX + x
8860                         if x.startswith(SETPREFIX):
8861                                 s = x[len(SETPREFIX):]
8862                                 if s not in sets:
8863                                         continue
8864                                 if s in self._sets:
8865                                         continue
8866                                 # Recursively expand sets so that containment tests in
8867                                 # self._get_parent_sets() properly match atoms in nested
8868                                 # sets (like if world contains system).
8869                                 expanded_set = InternalPackageSet(
8870                                         initial_atoms=getSetAtoms(s))
8871                                 self._sets[s] = expanded_set
8872                                 args.append(SetArg(arg=x, set=expanded_set,
8873                                         root_config=root_config))
8874                         else:
8875                                 if not portage.isvalidatom(x):
8876                                         continue
8877                                 args.append(AtomArg(arg=x, atom=x,
8878                                         root_config=root_config))
8879
8880                 self._set_args(args)
8881                 return args
8882
8883         class UnsatisfiedResumeDep(portage.exception.PortageException):
8884                 """
8885                 A dependency of a resume list is not installed. This
8886                 can occur when a required package is dropped from the
8887                 merge list via --skipfirst.
8888                 """
8889                 def __init__(self, depgraph, value):
8890                         portage.exception.PortageException.__init__(self, value)
8891                         self.depgraph = depgraph
8892
8893         class _internal_exception(portage.exception.PortageException):
8894                 def __init__(self, value=""):
8895                         portage.exception.PortageException.__init__(self, value)
8896
8897         class _unknown_internal_error(_internal_exception):
8898                 """
8899                 Used by the depgraph internally to terminate graph creation.
8900                 The specific reason for the failure should have been dumped
8901                 to stderr, unfortunately, the exact reason for the failure
8902                 may not be known.
8903                 """
8904
8905         class _serialize_tasks_retry(_internal_exception):
8906                 """
8907                 This is raised by the _serialize_tasks() method when it needs to
8908                 be called again for some reason. The only case that it's currently
8909                 used for is when neglected dependencies need to be added to the
8910                 graph in order to avoid making a potentially unsafe decision.
8911                 """
8912
8913         class _dep_check_composite_db(portage.dbapi):
8914                 """
8915                 A dbapi-like interface that is optimized for use in dep_check() calls.
8916                 This is built on top of the existing depgraph package selection logic.
8917                 Some packages that have been added to the graph may be masked from this
8918                 view in order to influence the atom preference selection that occurs
8919                 via dep_check().
8920                 """
8921                 def __init__(self, depgraph, root):
8922                         portage.dbapi.__init__(self)
8923                         self._depgraph = depgraph
8924                         self._root = root
8925                         self._match_cache = {}
8926                         self._cpv_pkg_map = {}
8927
8928                 def _clear_cache(self):
8929                         self._match_cache.clear()
8930                         self._cpv_pkg_map.clear()
8931
8932                 def match(self, atom):
8933                         ret = self._match_cache.get(atom)
8934                         if ret is not None:
8935                                 return ret[:]
8936                         orig_atom = atom
8937                         if "/" not in atom:
8938                                 atom = self._dep_expand(atom)
8939                         pkg, existing = self._depgraph._select_package(self._root, atom)
8940                         if not pkg:
8941                                 ret = []
8942                         else:
8943                                 # Return the highest available from select_package() as well as
8944                                 # any matching slots in the graph db.
8945                                 slots = set()
8946                                 slots.add(pkg.metadata["SLOT"])
8947                                 atom_cp = portage.dep_getkey(atom)
8948                                 if pkg.cp.startswith("virtual/"):
8949                                         # For new-style virtual lookahead that occurs inside
8950                                         # dep_check(), examine all slots. This is needed
8951                                         # so that newer slots will not unnecessarily be pulled in
8952                                         # when a satisfying lower slot is already installed. For
8953                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8954                                         # there's no need to pull in a newer slot to satisfy a
8955                                         # virtual/jdk dependency.
8956                                         for db, pkg_type, built, installed, db_keys in \
8957                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8958                                                 for cpv in db.match(atom):
8959                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8960                                                                 continue
8961                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8962                                 ret = []
8963                                 if self._visible(pkg):
8964                                         self._cpv_pkg_map[pkg.cpv] = pkg
8965                                         ret.append(pkg.cpv)
8966                                 slots.remove(pkg.metadata["SLOT"])
8967                                 while slots:
8968                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8969                                         pkg, existing = self._depgraph._select_package(
8970                                                 self._root, slot_atom)
8971                                         if not pkg:
8972                                                 continue
8973                                         if not self._visible(pkg):
8974                                                 continue
8975                                         self._cpv_pkg_map[pkg.cpv] = pkg
8976                                         ret.append(pkg.cpv)
8977                                 if ret:
8978                                         self._cpv_sort_ascending(ret)
8979                         self._match_cache[orig_atom] = ret
8980                         return ret[:]
8981
8982                 def _visible(self, pkg):
8983                         if pkg.installed and "selective" not in self._depgraph.myparams:
8984                                 try:
8985                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8986                                 except (StopIteration, portage.exception.InvalidDependString):
8987                                         arg = None
8988                                 if arg:
8989                                         return False
8990                         if pkg.installed:
8991                                 try:
8992                                         if not visible(
8993                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8994                                                 return False
8995                                 except portage.exception.InvalidDependString:
8996                                         pass
8997                         in_graph = self._depgraph._slot_pkg_map[
8998                                 self._root].get(pkg.slot_atom)
8999                         if in_graph is None:
9000                                 # Mask choices for packages which are not the highest visible
9001                                 # version within their slot (since they usually trigger slot
9002                                 # conflicts).
9003                                 highest_visible, in_graph = self._depgraph._select_package(
9004                                         self._root, pkg.slot_atom)
9005                                 if pkg != highest_visible:
9006                                         return False
9007                         elif in_graph != pkg:
9008                                 # Mask choices for packages that would trigger a slot
9009                                 # conflict with a previously selected package.
9010                                 return False
9011                         return True
9012
9013                 def _dep_expand(self, atom):
9014                         """
9015                         This is only needed for old installed packages that may
9016                         contain atoms that are not fully qualified with a specific
9017                         category. Emulate the cpv_expand() function that's used by
9018                         dbapi.match() in cases like this. If there are multiple
9019                         matches, it's often due to a new-style virtual that has
9020                         been added, so try to filter those out to avoid raising
9021                         a ValueError.
9022                         """
9023                         root_config = self._depgraph.roots[self._root]
9024                         orig_atom = atom
9025                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9026                         if len(expanded_atoms) > 1:
9027                                 non_virtual_atoms = []
9028                                 for x in expanded_atoms:
9029                                         if not portage.dep_getkey(x).startswith("virtual/"):
9030                                                 non_virtual_atoms.append(x)
9031                                 if len(non_virtual_atoms) == 1:
9032                                         expanded_atoms = non_virtual_atoms
9033                         if len(expanded_atoms) > 1:
9034                                 # compatible with portage.cpv_expand()
9035                                 raise portage.exception.AmbiguousPackageName(
9036                                         [portage.dep_getkey(x) for x in expanded_atoms])
9037                         if expanded_atoms:
9038                                 atom = expanded_atoms[0]
9039                         else:
9040                                 null_atom = insert_category_into_atom(atom, "null")
9041                                 null_cp = portage.dep_getkey(null_atom)
9042                                 cat, atom_pn = portage.catsplit(null_cp)
9043                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9044                                 if virts_p:
9045                                         # Allow the resolver to choose which virtual.
9046                                         atom = insert_category_into_atom(atom, "virtual")
9047                                 else:
9048                                         atom = insert_category_into_atom(atom, "null")
9049                         return atom
9050
9051                 def aux_get(self, cpv, wants):
9052                         metadata = self._cpv_pkg_map[cpv].metadata
9053                         return [metadata.get(x, "") for x in wants]
9054
9055 class RepoDisplay(object):
9056         def __init__(self, roots):
9057                 self._shown_repos = {}
9058                 self._unknown_repo = False
9059                 repo_paths = set()
9060                 for root_config in roots.itervalues():
9061                         portdir = root_config.settings.get("PORTDIR")
9062                         if portdir:
9063                                 repo_paths.add(portdir)
9064                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9065                         if overlays:
9066                                 repo_paths.update(overlays.split())
9067                 repo_paths = list(repo_paths)
9068                 self._repo_paths = repo_paths
9069                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9070                         for repo_path in repo_paths ]
9071
9072                 # pre-allocate index for PORTDIR so that it always has index 0.
9073                 for root_config in roots.itervalues():
9074                         portdb = root_config.trees["porttree"].dbapi
9075                         portdir = portdb.porttree_root
9076                         if portdir:
9077                                 self.repoStr(portdir)
9078
9079         def repoStr(self, repo_path_real):
9080                 real_index = -1
9081                 if repo_path_real:
9082                         real_index = self._repo_paths_real.index(repo_path_real)
9083                 if real_index == -1:
9084                         s = "?"
9085                         self._unknown_repo = True
9086                 else:
9087                         shown_repos = self._shown_repos
9088                         repo_paths = self._repo_paths
9089                         repo_path = repo_paths[real_index]
9090                         index = shown_repos.get(repo_path)
9091                         if index is None:
9092                                 index = len(shown_repos)
9093                                 shown_repos[repo_path] = index
9094                         s = str(index)
9095                 return s
9096
9097         def __str__(self):
9098                 output = []
9099                 shown_repos = self._shown_repos
9100                 unknown_repo = self._unknown_repo
9101                 if shown_repos or self._unknown_repo:
9102                         output.append("Portage tree and overlays:\n")
9103                 show_repo_paths = list(shown_repos)
9104                 for repo_path, repo_index in shown_repos.iteritems():
9105                         show_repo_paths[repo_index] = repo_path
9106                 if show_repo_paths:
9107                         for index, repo_path in enumerate(show_repo_paths):
9108                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9109                 if unknown_repo:
9110                         output.append(" "+teal("[?]") + \
9111                                 " indicates that the source repository could not be determined\n")
9112                 return "".join(output)
9113
9114 class PackageCounters(object):
9115
9116         def __init__(self):
9117                 self.upgrades   = 0
9118                 self.downgrades = 0
9119                 self.new        = 0
9120                 self.newslot    = 0
9121                 self.reinst     = 0
9122                 self.uninst     = 0
9123                 self.blocks     = 0
9124                 self.blocks_satisfied         = 0
9125                 self.totalsize  = 0
9126                 self.restrict_fetch           = 0
9127                 self.restrict_fetch_satisfied = 0
9128                 self.interactive              = 0
9129
9130         def __str__(self):
9131                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9132                 myoutput = []
9133                 details = []
9134                 myoutput.append("Total: %s package" % total_installs)
9135                 if total_installs != 1:
9136                         myoutput.append("s")
9137                 if total_installs != 0:
9138                         myoutput.append(" (")
9139                 if self.upgrades > 0:
9140                         details.append("%s upgrade" % self.upgrades)
9141                         if self.upgrades > 1:
9142                                 details[-1] += "s"
9143                 if self.downgrades > 0:
9144                         details.append("%s downgrade" % self.downgrades)
9145                         if self.downgrades > 1:
9146                                 details[-1] += "s"
9147                 if self.new > 0:
9148                         details.append("%s new" % self.new)
9149                 if self.newslot > 0:
9150                         details.append("%s in new slot" % self.newslot)
9151                         if self.newslot > 1:
9152                                 details[-1] += "s"
9153                 if self.reinst > 0:
9154                         details.append("%s reinstall" % self.reinst)
9155                         if self.reinst > 1:
9156                                 details[-1] += "s"
9157                 if self.uninst > 0:
9158                         details.append("%s uninstall" % self.uninst)
9159                         if self.uninst > 1:
9160                                 details[-1] += "s"
9161                 if self.interactive > 0:
9162                         details.append("%s %s" % (self.interactive,
9163                                 colorize("WARN", "interactive")))
9164                 myoutput.append(", ".join(details))
9165                 if total_installs != 0:
9166                         myoutput.append(")")
9167                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9168                 if self.restrict_fetch:
9169                         myoutput.append("\nFetch Restriction: %s package" % \
9170                                 self.restrict_fetch)
9171                         if self.restrict_fetch > 1:
9172                                 myoutput.append("s")
9173                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9174                         myoutput.append(bad(" (%s unsatisfied)") % \
9175                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9176                 if self.blocks > 0:
9177                         myoutput.append("\nConflict: %s block" % \
9178                                 self.blocks)
9179                         if self.blocks > 1:
9180                                 myoutput.append("s")
9181                         if self.blocks_satisfied < self.blocks:
9182                                 myoutput.append(bad(" (%s unsatisfied)") % \
9183                                         (self.blocks - self.blocks_satisfied))
9184                 return "".join(myoutput)
9185
9186 class PollSelectAdapter(PollConstants):
9187
9188         """
9189         Use select to emulate a poll object, for
9190         systems that don't support poll().
9191         """
9192
9193         def __init__(self):
9194                 self._registered = {}
9195                 self._select_args = [[], [], []]
9196
9197         def register(self, fd, *args):
9198                 """
9199                 Only POLLIN is currently supported!
9200                 """
9201                 if len(args) > 1:
9202                         raise TypeError(
9203                                 "register expected at most 2 arguments, got " + \
9204                                 repr(1 + len(args)))
9205
9206                 eventmask = PollConstants.POLLIN | \
9207                         PollConstants.POLLPRI | PollConstants.POLLOUT
9208                 if args:
9209                         eventmask = args[0]
9210
9211                 self._registered[fd] = eventmask
9212                 self._select_args = None
9213
9214         def unregister(self, fd):
9215                 self._select_args = None
9216                 del self._registered[fd]
9217
9218         def poll(self, *args):
9219                 if len(args) > 1:
9220                         raise TypeError(
9221                                 "poll expected at most 2 arguments, got " + \
9222                                 repr(1 + len(args)))
9223
9224                 timeout = None
9225                 if args:
9226                         timeout = args[0]
9227
9228                 select_args = self._select_args
9229                 if select_args is None:
9230                         select_args = [self._registered.keys(), [], []]
9231
9232                 if timeout is not None:
9233                         select_args = select_args[:]
9234                         # Translate poll() timeout args to select() timeout args:
9235                         #
9236                         #          | units        | value(s) for indefinite block
9237                         # ---------|--------------|------------------------------
9238                         #   poll   | milliseconds | omitted, negative, or None
9239                         # ---------|--------------|------------------------------
9240                         #   select | seconds      | omitted
9241                         # ---------|--------------|------------------------------
9242
9243                         if timeout is not None and timeout < 0:
9244                                 timeout = None
9245                         if timeout is not None:
9246                                 select_args.append(timeout / 1000)
9247
9248                 select_events = select.select(*select_args)
9249                 poll_events = []
9250                 for fd in select_events[0]:
9251                         poll_events.append((fd, PollConstants.POLLIN))
9252                 return poll_events
9253
9254 class SequentialTaskQueue(SlotObject):
9255
9256         __slots__ = ("max_jobs", "running_tasks") + \
9257                 ("_dirty", "_scheduling", "_task_queue")
9258
9259         def __init__(self, **kwargs):
9260                 SlotObject.__init__(self, **kwargs)
9261                 self._task_queue = deque()
9262                 self.running_tasks = set()
9263                 if self.max_jobs is None:
9264                         self.max_jobs = 1
9265                 self._dirty = True
9266
9267         def add(self, task):
9268                 self._task_queue.append(task)
9269                 self._dirty = True
9270
9271         def addFront(self, task):
9272                 self._task_queue.appendleft(task)
9273                 self._dirty = True
9274
9275         def schedule(self):
9276
9277                 if not self._dirty:
9278                         return False
9279
9280                 if not self:
9281                         return False
9282
9283                 if self._scheduling:
9284                         # Ignore any recursive schedule() calls triggered via
9285                         # self._task_exit().
9286                         return False
9287
9288                 self._scheduling = True
9289
9290                 task_queue = self._task_queue
9291                 running_tasks = self.running_tasks
9292                 max_jobs = self.max_jobs
9293                 state_changed = False
9294
9295                 while task_queue and \
9296                         (max_jobs is True or len(running_tasks) < max_jobs):
9297                         task = task_queue.popleft()
9298                         cancelled = getattr(task, "cancelled", None)
9299                         if not cancelled:
9300                                 running_tasks.add(task)
9301                                 task.addExitListener(self._task_exit)
9302                                 task.start()
9303                         state_changed = True
9304
9305                 self._dirty = False
9306                 self._scheduling = False
9307
9308                 return state_changed
9309
9310         def _task_exit(self, task):
9311                 """
9312                 Since we can always rely on exit listeners being called, the set of
9313                 running tasks is always pruned automatically and there is never any need
9314                 to actively prune it.
9315                 """
9316                 self.running_tasks.remove(task)
9317                 if self._task_queue:
9318                         self._dirty = True
9319
9320         def clear(self):
9321                 self._task_queue.clear()
9322                 running_tasks = self.running_tasks
9323                 while running_tasks:
9324                         task = running_tasks.pop()
9325                         task.removeExitListener(self._task_exit)
9326                         task.cancel()
9327                 self._dirty = False
9328
9329         def __nonzero__(self):
9330                 return bool(self._task_queue or self.running_tasks)
9331
9332         def __len__(self):
9333                 return len(self._task_queue) + len(self.running_tasks)
9334
9335 _can_poll_device = None
9336
9337 def can_poll_device():
9338         """
9339         Test if it's possible to use poll() on a device such as a pty. This
9340         is known to fail on Darwin.
9341         @rtype: bool
9342         @returns: True if poll() on a device succeeds, False otherwise.
9343         """
9344
9345         global _can_poll_device
9346         if _can_poll_device is not None:
9347                 return _can_poll_device
9348
9349         if not hasattr(select, "poll"):
9350                 _can_poll_device = False
9351                 return _can_poll_device
9352
9353         try:
9354                 dev_null = open('/dev/null', 'rb')
9355         except IOError:
9356                 _can_poll_device = False
9357                 return _can_poll_device
9358
9359         p = select.poll()
9360         p.register(dev_null.fileno(), PollConstants.POLLIN)
9361
9362         invalid_request = False
9363         for f, event in p.poll():
9364                 if event & PollConstants.POLLNVAL:
9365                         invalid_request = True
9366                         break
9367         dev_null.close()
9368
9369         _can_poll_device = not invalid_request
9370         return _can_poll_device
9371
9372 def create_poll_instance():
9373         """
9374         Create an instance of select.poll, or an instance of
9375         PollSelectAdapter there is no poll() implementation or
9376         it is broken somehow.
9377         """
9378         if can_poll_device():
9379                 return select.poll()
9380         return PollSelectAdapter()
9381
9382 getloadavg = getattr(os, "getloadavg", None)
9383 if getloadavg is None:
9384         def getloadavg():
9385                 """
9386                 Uses /proc/loadavg to emulate os.getloadavg().
9387                 Raises OSError if the load average was unobtainable.
9388                 """
9389                 try:
9390                         loadavg_str = open('/proc/loadavg').readline()
9391                 except IOError:
9392                         # getloadavg() is only supposed to raise OSError, so convert
9393                         raise OSError('unknown')
9394                 loadavg_split = loadavg_str.split()
9395                 if len(loadavg_split) < 3:
9396                         raise OSError('unknown')
9397                 loadavg_floats = []
9398                 for i in xrange(3):
9399                         try:
9400                                 loadavg_floats.append(float(loadavg_split[i]))
9401                         except ValueError:
9402                                 raise OSError('unknown')
9403                 return tuple(loadavg_floats)
9404
9405 class PollScheduler(object):
9406
9407         class _sched_iface_class(SlotObject):
9408                 __slots__ = ("register", "schedule", "unregister")
9409
9410         def __init__(self):
9411                 self._max_jobs = 1
9412                 self._max_load = None
9413                 self._jobs = 0
9414                 self._poll_event_queue = []
9415                 self._poll_event_handlers = {}
9416                 self._poll_event_handler_ids = {}
9417                 # Increment id for each new handler.
9418                 self._event_handler_id = 0
9419                 self._poll_obj = create_poll_instance()
9420                 self._scheduling = False
9421
9422         def _schedule(self):
9423                 """
9424                 Calls _schedule_tasks() and automatically returns early from
9425                 any recursive calls to this method that the _schedule_tasks()
9426                 call might trigger. This makes _schedule() safe to call from
9427                 inside exit listeners.
9428                 """
9429                 if self._scheduling:
9430                         return False
9431                 self._scheduling = True
9432                 try:
9433                         return self._schedule_tasks()
9434                 finally:
9435                         self._scheduling = False
9436
9437         def _running_job_count(self):
9438                 return self._jobs
9439
9440         def _can_add_job(self):
9441                 max_jobs = self._max_jobs
9442                 max_load = self._max_load
9443
9444                 if self._max_jobs is not True and \
9445                         self._running_job_count() >= self._max_jobs:
9446                         return False
9447
9448                 if max_load is not None and \
9449                         (max_jobs is True or max_jobs > 1) and \
9450                         self._running_job_count() >= 1:
9451                         try:
9452                                 avg1, avg5, avg15 = getloadavg()
9453                         except OSError:
9454                                 return False
9455
9456                         if avg1 >= max_load:
9457                                 return False
9458
9459                 return True
9460
9461         def _poll(self, timeout=None):
9462                 """
9463                 All poll() calls pass through here. The poll events
9464                 are added directly to self._poll_event_queue.
9465                 In order to avoid endless blocking, this raises
9466                 StopIteration if timeout is None and there are
9467                 no file descriptors to poll.
9468                 """
9469                 if not self._poll_event_handlers:
9470                         self._schedule()
9471                         if timeout is None and \
9472                                 not self._poll_event_handlers:
9473                                 raise StopIteration(
9474                                         "timeout is None and there are no poll() event handlers")
9475
9476                 # The following error is known to occur with Linux kernel versions
9477                 # less than 2.6.24:
9478                 #
9479                 #   select.error: (4, 'Interrupted system call')
9480                 #
9481                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9482                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9483                 # without any events.
9484                 while True:
9485                         try:
9486                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9487                                 break
9488                         except select.error, e:
9489                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9490                                         level=logging.ERROR, noiselevel=-1)
9491                                 del e
9492                                 if timeout is not None:
9493                                         break
9494
9495         def _next_poll_event(self, timeout=None):
9496                 """
9497                 Since the _schedule_wait() loop is called by event
9498                 handlers from _poll_loop(), maintain a central event
9499                 queue for both of them to share events from a single
9500                 poll() call. In order to avoid endless blocking, this
9501                 raises StopIteration if timeout is None and there are
9502                 no file descriptors to poll.
9503                 """
9504                 if not self._poll_event_queue:
9505                         self._poll(timeout)
9506                 return self._poll_event_queue.pop()
9507
9508         def _poll_loop(self):
9509
9510                 event_handlers = self._poll_event_handlers
9511                 event_handled = False
9512
9513                 try:
9514                         while event_handlers:
9515                                 f, event = self._next_poll_event()
9516                                 handler, reg_id = event_handlers[f]
9517                                 handler(f, event)
9518                                 event_handled = True
9519                 except StopIteration:
9520                         event_handled = True
9521
9522                 if not event_handled:
9523                         raise AssertionError("tight loop")
9524
9525         def _schedule_yield(self):
9526                 """
9527                 Schedule for a short period of time chosen by the scheduler based
9528                 on internal state. Synchronous tasks should call this periodically
9529                 in order to allow the scheduler to service pending poll events. The
9530                 scheduler will call poll() exactly once, without blocking, and any
9531                 resulting poll events will be serviced.
9532                 """
9533                 event_handlers = self._poll_event_handlers
9534                 events_handled = 0
9535
9536                 if not event_handlers:
9537                         return bool(events_handled)
9538
9539                 if not self._poll_event_queue:
9540                         self._poll(0)
9541
9542                 try:
9543                         while event_handlers and self._poll_event_queue:
9544                                 f, event = self._next_poll_event()
9545                                 handler, reg_id = event_handlers[f]
9546                                 handler(f, event)
9547                                 events_handled += 1
9548                 except StopIteration:
9549                         events_handled += 1
9550
9551                 return bool(events_handled)
9552
9553         def _register(self, f, eventmask, handler):
9554                 """
9555                 @rtype: Integer
9556                 @return: A unique registration id, for use in schedule() or
9557                         unregister() calls.
9558                 """
9559                 if f in self._poll_event_handlers:
9560                         raise AssertionError("fd %d is already registered" % f)
9561                 self._event_handler_id += 1
9562                 reg_id = self._event_handler_id
9563                 self._poll_event_handler_ids[reg_id] = f
9564                 self._poll_event_handlers[f] = (handler, reg_id)
9565                 self._poll_obj.register(f, eventmask)
9566                 return reg_id
9567
9568         def _unregister(self, reg_id):
9569                 f = self._poll_event_handler_ids[reg_id]
9570                 self._poll_obj.unregister(f)
9571                 del self._poll_event_handlers[f]
9572                 del self._poll_event_handler_ids[reg_id]
9573
9574         def _schedule_wait(self, wait_ids):
9575                 """
9576                 Schedule until wait_id is not longer registered
9577                 for poll() events.
9578                 @type wait_id: int
9579                 @param wait_id: a task id to wait for
9580                 """
9581                 event_handlers = self._poll_event_handlers
9582                 handler_ids = self._poll_event_handler_ids
9583                 event_handled = False
9584
9585                 if isinstance(wait_ids, int):
9586                         wait_ids = frozenset([wait_ids])
9587
9588                 try:
9589                         while wait_ids.intersection(handler_ids):
9590                                 f, event = self._next_poll_event()
9591                                 handler, reg_id = event_handlers[f]
9592                                 handler(f, event)
9593                                 event_handled = True
9594                 except StopIteration:
9595                         event_handled = True
9596
9597                 return event_handled
9598
9599 class QueueScheduler(PollScheduler):
9600
9601         """
9602         Add instances of SequentialTaskQueue and then call run(). The
9603         run() method returns when no tasks remain.
9604         """
9605
9606         def __init__(self, max_jobs=None, max_load=None):
9607                 PollScheduler.__init__(self)
9608
9609                 if max_jobs is None:
9610                         max_jobs = 1
9611
9612                 self._max_jobs = max_jobs
9613                 self._max_load = max_load
9614                 self.sched_iface = self._sched_iface_class(
9615                         register=self._register,
9616                         schedule=self._schedule_wait,
9617                         unregister=self._unregister)
9618
9619                 self._queues = []
9620                 self._schedule_listeners = []
9621
9622         def add(self, q):
9623                 self._queues.append(q)
9624
9625         def remove(self, q):
9626                 self._queues.remove(q)
9627
9628         def run(self):
9629
9630                 while self._schedule():
9631                         self._poll_loop()
9632
9633                 while self._running_job_count():
9634                         self._poll_loop()
9635
9636         def _schedule_tasks(self):
9637                 """
9638                 @rtype: bool
9639                 @returns: True if there may be remaining tasks to schedule,
9640                         False otherwise.
9641                 """
9642                 while self._can_add_job():
9643                         n = self._max_jobs - self._running_job_count()
9644                         if n < 1:
9645                                 break
9646
9647                         if not self._start_next_job(n):
9648                                 return False
9649
9650                 for q in self._queues:
9651                         if q:
9652                                 return True
9653                 return False
9654
9655         def _running_job_count(self):
9656                 job_count = 0
9657                 for q in self._queues:
9658                         job_count += len(q.running_tasks)
9659                 self._jobs = job_count
9660                 return job_count
9661
9662         def _start_next_job(self, n=1):
9663                 started_count = 0
9664                 for q in self._queues:
9665                         initial_job_count = len(q.running_tasks)
9666                         q.schedule()
9667                         final_job_count = len(q.running_tasks)
9668                         if final_job_count > initial_job_count:
9669                                 started_count += (final_job_count - initial_job_count)
9670                         if started_count >= n:
9671                                 break
9672                 return started_count
9673
9674 class TaskScheduler(object):
9675
9676         """
9677         A simple way to handle scheduling of AsynchrousTask instances. Simply
9678         add tasks and call run(). The run() method returns when no tasks remain.
9679         """
9680
9681         def __init__(self, max_jobs=None, max_load=None):
9682                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9683                 self._scheduler = QueueScheduler(
9684                         max_jobs=max_jobs, max_load=max_load)
9685                 self.sched_iface = self._scheduler.sched_iface
9686                 self.run = self._scheduler.run
9687                 self._scheduler.add(self._queue)
9688
9689         def add(self, task):
9690                 self._queue.add(task)
9691
9692 class JobStatusDisplay(object):
9693
9694         _bound_properties = ("curval", "failed", "running")
9695         _jobs_column_width = 48
9696
9697         # Don't update the display unless at least this much
9698         # time has passed, in units of seconds.
9699         _min_display_latency = 2
9700
9701         _default_term_codes = {
9702                 'cr'  : '\r',
9703                 'el'  : '\x1b[K',
9704                 'nel' : '\n',
9705         }
9706
9707         _termcap_name_map = {
9708                 'carriage_return' : 'cr',
9709                 'clr_eol'         : 'el',
9710                 'newline'         : 'nel',
9711         }
9712
9713         def __init__(self, out=sys.stdout, quiet=False):
9714                 object.__setattr__(self, "out", out)
9715                 object.__setattr__(self, "quiet", quiet)
9716                 object.__setattr__(self, "maxval", 0)
9717                 object.__setattr__(self, "merges", 0)
9718                 object.__setattr__(self, "_changed", False)
9719                 object.__setattr__(self, "_displayed", False)
9720                 object.__setattr__(self, "_last_display_time", 0)
9721                 object.__setattr__(self, "width", 80)
9722                 self.reset()
9723
9724                 isatty = hasattr(out, "isatty") and out.isatty()
9725                 object.__setattr__(self, "_isatty", isatty)
9726                 if not isatty or not self._init_term():
9727                         term_codes = {}
9728                         for k, capname in self._termcap_name_map.iteritems():
9729                                 term_codes[k] = self._default_term_codes[capname]
9730                         object.__setattr__(self, "_term_codes", term_codes)
9731                 encoding = sys.getdefaultencoding()
9732                 for k, v in self._term_codes.items():
9733                         if not isinstance(v, basestring):
9734                                 self._term_codes[k] = v.decode(encoding, 'replace')
9735
9736         def _init_term(self):
9737                 """
9738                 Initialize term control codes.
9739                 @rtype: bool
9740                 @returns: True if term codes were successfully initialized,
9741                         False otherwise.
9742                 """
9743
9744                 term_type = os.environ.get("TERM", "vt100")
9745                 tigetstr = None
9746
9747                 try:
9748                         import curses
9749                         try:
9750                                 curses.setupterm(term_type, self.out.fileno())
9751                                 tigetstr = curses.tigetstr
9752                         except curses.error:
9753                                 pass
9754                 except ImportError:
9755                         pass
9756
9757                 if tigetstr is None:
9758                         return False
9759
9760                 term_codes = {}
9761                 for k, capname in self._termcap_name_map.iteritems():
9762                         code = tigetstr(capname)
9763                         if code is None:
9764                                 code = self._default_term_codes[capname]
9765                         term_codes[k] = code
9766                 object.__setattr__(self, "_term_codes", term_codes)
9767                 return True
9768
9769         def _format_msg(self, msg):
9770                 return ">>> %s" % msg
9771
9772         def _erase(self):
9773                 self.out.write(
9774                         self._term_codes['carriage_return'] + \
9775                         self._term_codes['clr_eol'])
9776                 self.out.flush()
9777                 self._displayed = False
9778
9779         def _display(self, line):
9780                 self.out.write(line)
9781                 self.out.flush()
9782                 self._displayed = True
9783
9784         def _update(self, msg):
9785
9786                 out = self.out
9787                 if not self._isatty:
9788                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9789                         self.out.flush()
9790                         self._displayed = True
9791                         return
9792
9793                 if self._displayed:
9794                         self._erase()
9795
9796                 self._display(self._format_msg(msg))
9797
9798         def displayMessage(self, msg):
9799
9800                 was_displayed = self._displayed
9801
9802                 if self._isatty and self._displayed:
9803                         self._erase()
9804
9805                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9806                 self.out.flush()
9807                 self._displayed = False
9808
9809                 if was_displayed:
9810                         self._changed = True
9811                         self.display()
9812
9813         def reset(self):
9814                 self.maxval = 0
9815                 self.merges = 0
9816                 for name in self._bound_properties:
9817                         object.__setattr__(self, name, 0)
9818
9819                 if self._displayed:
9820                         self.out.write(self._term_codes['newline'])
9821                         self.out.flush()
9822                         self._displayed = False
9823
9824         def __setattr__(self, name, value):
9825                 old_value = getattr(self, name)
9826                 if value == old_value:
9827                         return
9828                 object.__setattr__(self, name, value)
9829                 if name in self._bound_properties:
9830                         self._property_change(name, old_value, value)
9831
9832         def _property_change(self, name, old_value, new_value):
9833                 self._changed = True
9834                 self.display()
9835
9836         def _load_avg_str(self):
9837                 try:
9838                         avg = getloadavg()
9839                 except OSError:
9840                         return 'unknown'
9841
9842                 max_avg = max(avg)
9843
9844                 if max_avg < 10:
9845                         digits = 2
9846                 elif max_avg < 100:
9847                         digits = 1
9848                 else:
9849                         digits = 0
9850
9851                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9852
9853         def display(self):
9854                 """
9855                 Display status on stdout, but only if something has
9856                 changed since the last call.
9857                 """
9858
9859                 if self.quiet:
9860                         return
9861
9862                 current_time = time.time()
9863                 time_delta = current_time - self._last_display_time
9864                 if self._displayed and \
9865                         not self._changed:
9866                         if not self._isatty:
9867                                 return
9868                         if time_delta < self._min_display_latency:
9869                                 return
9870
9871                 self._last_display_time = current_time
9872                 self._changed = False
9873                 self._display_status()
9874
9875         def _display_status(self):
9876                 # Don't use len(self._completed_tasks) here since that also
9877                 # can include uninstall tasks.
9878                 curval_str = str(self.curval)
9879                 maxval_str = str(self.maxval)
9880                 running_str = str(self.running)
9881                 failed_str = str(self.failed)
9882                 load_avg_str = self._load_avg_str()
9883
9884                 color_output = StringIO()
9885                 plain_output = StringIO()
9886                 style_file = portage.output.ConsoleStyleFile(color_output)
9887                 style_file.write_listener = plain_output
9888                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9889                 style_writer.style_listener = style_file.new_styles
9890                 f = formatter.AbstractFormatter(style_writer)
9891
9892                 number_style = "INFORM"
9893                 f.add_literal_data("Jobs: ")
9894                 f.push_style(number_style)
9895                 f.add_literal_data(curval_str)
9896                 f.pop_style()
9897                 f.add_literal_data(" of ")
9898                 f.push_style(number_style)
9899                 f.add_literal_data(maxval_str)
9900                 f.pop_style()
9901                 f.add_literal_data(" complete")
9902
9903                 if self.running:
9904                         f.add_literal_data(", ")
9905                         f.push_style(number_style)
9906                         f.add_literal_data(running_str)
9907                         f.pop_style()
9908                         f.add_literal_data(" running")
9909
9910                 if self.failed:
9911                         f.add_literal_data(", ")
9912                         f.push_style(number_style)
9913                         f.add_literal_data(failed_str)
9914                         f.pop_style()
9915                         f.add_literal_data(" failed")
9916
9917                 padding = self._jobs_column_width - len(plain_output.getvalue())
9918                 if padding > 0:
9919                         f.add_literal_data(padding * " ")
9920
9921                 f.add_literal_data("Load avg: ")
9922                 f.add_literal_data(load_avg_str)
9923
9924                 # Truncate to fit width, to avoid making the terminal scroll if the
9925                 # line overflows (happens when the load average is large).
9926                 plain_output = plain_output.getvalue()
9927                 if self._isatty and len(plain_output) > self.width:
9928                         # Use plain_output here since it's easier to truncate
9929                         # properly than the color output which contains console
9930                         # color codes.
9931                         self._update(plain_output[:self.width])
9932                 else:
9933                         self._update(color_output.getvalue())
9934
9935                 xtermTitle(" ".join(plain_output.split()))
9936
9937 class Scheduler(PollScheduler):
9938
9939         _opts_ignore_blockers = \
9940                 frozenset(["--buildpkgonly",
9941                 "--fetchonly", "--fetch-all-uri",
9942                 "--nodeps", "--pretend"])
9943
9944         _opts_no_background = \
9945                 frozenset(["--pretend",
9946                 "--fetchonly", "--fetch-all-uri"])
9947
9948         _opts_no_restart = frozenset(["--buildpkgonly",
9949                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9950
9951         _bad_resume_opts = set(["--ask", "--changelog",
9952                 "--resume", "--skipfirst"])
9953
9954         _fetch_log = "/var/log/emerge-fetch.log"
9955
9956         class _iface_class(SlotObject):
9957                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9958                         "dblinkElog", "fetch", "register", "schedule",
9959                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9960                         "unregister")
9961
9962         class _fetch_iface_class(SlotObject):
9963                 __slots__ = ("log_file", "schedule")
9964
9965         _task_queues_class = slot_dict_class(
9966                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9967
9968         class _build_opts_class(SlotObject):
9969                 __slots__ = ("buildpkg", "buildpkgonly",
9970                         "fetch_all_uri", "fetchonly", "pretend")
9971
9972         class _binpkg_opts_class(SlotObject):
9973                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9974
9975         class _pkg_count_class(SlotObject):
9976                 __slots__ = ("curval", "maxval")
9977
9978         class _emerge_log_class(SlotObject):
9979                 __slots__ = ("xterm_titles",)
9980
9981                 def log(self, *pargs, **kwargs):
9982                         if not self.xterm_titles:
9983                                 # Avoid interference with the scheduler's status display.
9984                                 kwargs.pop("short_msg", None)
9985                         emergelog(self.xterm_titles, *pargs, **kwargs)
9986
9987         class _failed_pkg(SlotObject):
9988                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9989
9990         class _ConfigPool(object):
9991                 """Interface for a task to temporarily allocate a config
9992                 instance from a pool. This allows a task to be constructed
9993                 long before the config instance actually becomes needed, like
9994                 when prefetchers are constructed for the whole merge list."""
9995                 __slots__ = ("_root", "_allocate", "_deallocate")
9996                 def __init__(self, root, allocate, deallocate):
9997                         self._root = root
9998                         self._allocate = allocate
9999                         self._deallocate = deallocate
10000                 def allocate(self):
10001                         return self._allocate(self._root)
10002                 def deallocate(self, settings):
10003                         self._deallocate(settings)
10004
10005         class _unknown_internal_error(portage.exception.PortageException):
10006                 """
10007                 Used internally to terminate scheduling. The specific reason for
10008                 the failure should have been dumped to stderr.
10009                 """
10010                 def __init__(self, value=""):
10011                         portage.exception.PortageException.__init__(self, value)
10012
10013         def __init__(self, settings, trees, mtimedb, myopts,
10014                 spinner, mergelist, favorites, digraph):
10015                 PollScheduler.__init__(self)
10016                 self.settings = settings
10017                 self.target_root = settings["ROOT"]
10018                 self.trees = trees
10019                 self.myopts = myopts
10020                 self._spinner = spinner
10021                 self._mtimedb = mtimedb
10022                 self._mergelist = mergelist
10023                 self._favorites = favorites
10024                 self._args_set = InternalPackageSet(favorites)
10025                 self._build_opts = self._build_opts_class()
10026                 for k in self._build_opts.__slots__:
10027                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10028                 self._binpkg_opts = self._binpkg_opts_class()
10029                 for k in self._binpkg_opts.__slots__:
10030                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10031
10032                 self.curval = 0
10033                 self._logger = self._emerge_log_class()
10034                 self._task_queues = self._task_queues_class()
10035                 for k in self._task_queues.allowed_keys:
10036                         setattr(self._task_queues, k,
10037                                 SequentialTaskQueue())
10038
10039                 # Holds merges that will wait to be executed when no builds are
10040                 # executing. This is useful for system packages since dependencies
10041                 # on system packages are frequently unspecified.
10042                 self._merge_wait_queue = []
10043                 # Holds merges that have been transfered from the merge_wait_queue to
10044                 # the actual merge queue. They are removed from this list upon
10045                 # completion. Other packages can start building only when this list is
10046                 # empty.
10047                 self._merge_wait_scheduled = []
10048
10049                 # Holds system packages and their deep runtime dependencies. Before
10050                 # being merged, these packages go to merge_wait_queue, to be merged
10051                 # when no other packages are building.
10052                 self._deep_system_deps = set()
10053
10054                 # Holds packages to merge which will satisfy currently unsatisfied
10055                 # deep runtime dependencies of system packages. If this is not empty
10056                 # then no parallel builds will be spawned until it is empty. This
10057                 # minimizes the possibility that a build will fail due to the system
10058                 # being in a fragile state. For example, see bug #259954.
10059                 self._unsatisfied_system_deps = set()
10060
10061                 self._status_display = JobStatusDisplay()
10062                 self._max_load = myopts.get("--load-average")
10063                 max_jobs = myopts.get("--jobs")
10064                 if max_jobs is None:
10065                         max_jobs = 1
10066                 self._set_max_jobs(max_jobs)
10067
10068                 # The root where the currently running
10069                 # portage instance is installed.
10070                 self._running_root = trees["/"]["root_config"]
10071                 self.edebug = 0
10072                 if settings.get("PORTAGE_DEBUG", "") == "1":
10073                         self.edebug = 1
10074                 self.pkgsettings = {}
10075                 self._config_pool = {}
10076                 self._blocker_db = {}
10077                 for root in trees:
10078                         self._config_pool[root] = []
10079                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10080
10081                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10082                         schedule=self._schedule_fetch)
10083                 self._sched_iface = self._iface_class(
10084                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10085                         dblinkDisplayMerge=self._dblink_display_merge,
10086                         dblinkElog=self._dblink_elog,
10087                         fetch=fetch_iface, register=self._register,
10088                         schedule=self._schedule_wait,
10089                         scheduleSetup=self._schedule_setup,
10090                         scheduleUnpack=self._schedule_unpack,
10091                         scheduleYield=self._schedule_yield,
10092                         unregister=self._unregister)
10093
10094                 self._prefetchers = weakref.WeakValueDictionary()
10095                 self._pkg_queue = []
10096                 self._completed_tasks = set()
10097
10098                 self._failed_pkgs = []
10099                 self._failed_pkgs_all = []
10100                 self._failed_pkgs_die_msgs = []
10101                 self._post_mod_echo_msgs = []
10102                 self._parallel_fetch = False
10103                 merge_count = len([x for x in mergelist \
10104                         if isinstance(x, Package) and x.operation == "merge"])
10105                 self._pkg_count = self._pkg_count_class(
10106                         curval=0, maxval=merge_count)
10107                 self._status_display.maxval = self._pkg_count.maxval
10108
10109                 # The load average takes some time to respond when new
10110                 # jobs are added, so we need to limit the rate of adding
10111                 # new jobs.
10112                 self._job_delay_max = 10
10113                 self._job_delay_factor = 1.0
10114                 self._job_delay_exp = 1.5
10115                 self._previous_job_start_time = None
10116
10117                 self._set_digraph(digraph)
10118
10119                 # This is used to memoize the _choose_pkg() result when
10120                 # no packages can be chosen until one of the existing
10121                 # jobs completes.
10122                 self._choose_pkg_return_early = False
10123
10124                 features = self.settings.features
10125                 if "parallel-fetch" in features and \
10126                         not ("--pretend" in self.myopts or \
10127                         "--fetch-all-uri" in self.myopts or \
10128                         "--fetchonly" in self.myopts):
10129                         if "distlocks" not in features:
10130                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10131                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10132                                         "requires the distlocks feature enabled"+"\n",
10133                                         noiselevel=-1)
10134                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10135                                         "thus parallel-fetching is being disabled"+"\n",
10136                                         noiselevel=-1)
10137                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10138                         elif len(mergelist) > 1:
10139                                 self._parallel_fetch = True
10140
10141                 if self._parallel_fetch:
10142                                 # clear out existing fetch log if it exists
10143                                 try:
10144                                         open(self._fetch_log, 'w')
10145                                 except EnvironmentError:
10146                                         pass
10147
10148                 self._running_portage = None
10149                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10150                         portage.const.PORTAGE_PACKAGE_ATOM)
10151                 if portage_match:
10152                         cpv = portage_match.pop()
10153                         self._running_portage = self._pkg(cpv, "installed",
10154                                 self._running_root, installed=True)
10155
10156         def _poll(self, timeout=None):
10157                 self._schedule()
10158                 PollScheduler._poll(self, timeout=timeout)
10159
10160         def _set_max_jobs(self, max_jobs):
10161                 self._max_jobs = max_jobs
10162                 self._task_queues.jobs.max_jobs = max_jobs
10163
10164         def _background_mode(self):
10165                 """
10166                 Check if background mode is enabled and adjust states as necessary.
10167
10168                 @rtype: bool
10169                 @returns: True if background mode is enabled, False otherwise.
10170                 """
10171                 background = (self._max_jobs is True or \
10172                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10173                         not bool(self._opts_no_background.intersection(self.myopts))
10174
10175                 if background:
10176                         interactive_tasks = self._get_interactive_tasks()
10177                         if interactive_tasks:
10178                                 background = False
10179                                 writemsg_level(">>> Sending package output to stdio due " + \
10180                                         "to interactive package(s):\n",
10181                                         level=logging.INFO, noiselevel=-1)
10182                                 msg = [""]
10183                                 for pkg in interactive_tasks:
10184                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10185                                         if pkg.root != "/":
10186                                                 pkg_str += " for " + pkg.root
10187                                         msg.append(pkg_str)
10188                                 msg.append("")
10189                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10190                                         level=logging.INFO, noiselevel=-1)
10191                                 if self._max_jobs is True or self._max_jobs > 1:
10192                                         self._set_max_jobs(1)
10193                                         writemsg_level(">>> Setting --jobs=1 due " + \
10194                                                 "to the above interactive package(s)\n",
10195                                                 level=logging.INFO, noiselevel=-1)
10196
10197                 self._status_display.quiet = \
10198                         not background or \
10199                         ("--quiet" in self.myopts and \
10200                         "--verbose" not in self.myopts)
10201
10202                 self._logger.xterm_titles = \
10203                         "notitles" not in self.settings.features and \
10204                         self._status_display.quiet
10205
10206                 return background
10207
10208         def _get_interactive_tasks(self):
10209                 from portage import flatten
10210                 from portage.dep import use_reduce, paren_reduce
10211                 interactive_tasks = []
10212                 for task in self._mergelist:
10213                         if not (isinstance(task, Package) and \
10214                                 task.operation == "merge"):
10215                                 continue
10216                         try:
10217                                 properties = flatten(use_reduce(paren_reduce(
10218                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10219                         except portage.exception.InvalidDependString, e:
10220                                 show_invalid_depstring_notice(task,
10221                                         task.metadata["PROPERTIES"], str(e))
10222                                 raise self._unknown_internal_error()
10223                         if "interactive" in properties:
10224                                 interactive_tasks.append(task)
10225                 return interactive_tasks
10226
10227         def _set_digraph(self, digraph):
10228                 if "--nodeps" in self.myopts or \
10229                         (self._max_jobs is not True and self._max_jobs < 2):
10230                         # save some memory
10231                         self._digraph = None
10232                         return
10233
10234                 self._digraph = digraph
10235                 self._find_system_deps()
10236                 self._prune_digraph()
10237                 self._prevent_builddir_collisions()
10238
10239         def _find_system_deps(self):
10240                 """
10241                 Find system packages and their deep runtime dependencies. Before being
10242                 merged, these packages go to merge_wait_queue, to be merged when no
10243                 other packages are building.
10244                 """
10245                 deep_system_deps = self._deep_system_deps
10246                 deep_system_deps.clear()
10247                 deep_system_deps.update(
10248                         _find_deep_system_runtime_deps(self._digraph))
10249                 deep_system_deps.difference_update([pkg for pkg in \
10250                         deep_system_deps if pkg.operation != "merge"])
10251
10252         def _prune_digraph(self):
10253                 """
10254                 Prune any root nodes that are irrelevant.
10255                 """
10256
10257                 graph = self._digraph
10258                 completed_tasks = self._completed_tasks
10259                 removed_nodes = set()
10260                 while True:
10261                         for node in graph.root_nodes():
10262                                 if not isinstance(node, Package) or \
10263                                         (node.installed and node.operation == "nomerge") or \
10264                                         node.onlydeps or \
10265                                         node in completed_tasks:
10266                                         removed_nodes.add(node)
10267                         if removed_nodes:
10268                                 graph.difference_update(removed_nodes)
10269                         if not removed_nodes:
10270                                 break
10271                         removed_nodes.clear()
10272
10273         def _prevent_builddir_collisions(self):
10274                 """
10275                 When building stages, sometimes the same exact cpv needs to be merged
10276                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10277                 in the builddir. Currently, normal file locks would be inappropriate
10278                 for this purpose since emerge holds all of it's build dir locks from
10279                 the main process.
10280                 """
10281                 cpv_map = {}
10282                 for pkg in self._mergelist:
10283                         if not isinstance(pkg, Package):
10284                                 # a satisfied blocker
10285                                 continue
10286                         if pkg.installed:
10287                                 continue
10288                         if pkg.cpv not in cpv_map:
10289                                 cpv_map[pkg.cpv] = [pkg]
10290                                 continue
10291                         for earlier_pkg in cpv_map[pkg.cpv]:
10292                                 self._digraph.add(earlier_pkg, pkg,
10293                                         priority=DepPriority(buildtime=True))
10294                         cpv_map[pkg.cpv].append(pkg)
10295
10296         class _pkg_failure(portage.exception.PortageException):
10297                 """
10298                 An instance of this class is raised by unmerge() when
10299                 an uninstallation fails.
10300                 """
10301                 status = 1
10302                 def __init__(self, *pargs):
10303                         portage.exception.PortageException.__init__(self, pargs)
10304                         if pargs:
10305                                 self.status = pargs[0]
10306
10307         def _schedule_fetch(self, fetcher):
10308                 """
10309                 Schedule a fetcher on the fetch queue, in order to
10310                 serialize access to the fetch log.
10311                 """
10312                 self._task_queues.fetch.addFront(fetcher)
10313
10314         def _schedule_setup(self, setup_phase):
10315                 """
10316                 Schedule a setup phase on the merge queue, in order to
10317                 serialize unsandboxed access to the live filesystem.
10318                 """
10319                 self._task_queues.merge.addFront(setup_phase)
10320                 self._schedule()
10321
10322         def _schedule_unpack(self, unpack_phase):
10323                 """
10324                 Schedule an unpack phase on the unpack queue, in order
10325                 to serialize $DISTDIR access for live ebuilds.
10326                 """
10327                 self._task_queues.unpack.add(unpack_phase)
10328
10329         def _find_blockers(self, new_pkg):
10330                 """
10331                 Returns a callable which should be called only when
10332                 the vdb lock has been acquired.
10333                 """
10334                 def get_blockers():
10335                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10336                 return get_blockers
10337
10338         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10339                 if self._opts_ignore_blockers.intersection(self.myopts):
10340                         return None
10341
10342                 # Call gc.collect() here to avoid heap overflow that
10343                 # triggers 'Cannot allocate memory' errors (reported
10344                 # with python-2.5).
10345                 import gc
10346                 gc.collect()
10347
10348                 blocker_db = self._blocker_db[new_pkg.root]
10349
10350                 blocker_dblinks = []
10351                 for blocking_pkg in blocker_db.findInstalledBlockers(
10352                         new_pkg, acquire_lock=acquire_lock):
10353                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10354                                 continue
10355                         if new_pkg.cpv == blocking_pkg.cpv:
10356                                 continue
10357                         blocker_dblinks.append(portage.dblink(
10358                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10359                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10360                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10361
10362                 gc.collect()
10363
10364                 return blocker_dblinks
10365
10366         def _dblink_pkg(self, pkg_dblink):
10367                 cpv = pkg_dblink.mycpv
10368                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10369                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10370                 installed = type_name == "installed"
10371                 return self._pkg(cpv, type_name, root_config, installed=installed)
10372
10373         def _append_to_log_path(self, log_path, msg):
10374                 f = open(log_path, 'a')
10375                 try:
10376                         f.write(msg)
10377                 finally:
10378                         f.close()
10379
10380         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10381
10382                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10383                 log_file = None
10384                 out = sys.stdout
10385                 background = self._background
10386
10387                 if background and log_path is not None:
10388                         log_file = open(log_path, 'a')
10389                         out = log_file
10390
10391                 try:
10392                         for msg in msgs:
10393                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10394                 finally:
10395                         if log_file is not None:
10396                                 log_file.close()
10397
10398         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10399                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10400                 background = self._background
10401
10402                 if log_path is None:
10403                         if not (background and level < logging.WARN):
10404                                 portage.util.writemsg_level(msg,
10405                                         level=level, noiselevel=noiselevel)
10406                 else:
10407                         if not background:
10408                                 portage.util.writemsg_level(msg,
10409                                         level=level, noiselevel=noiselevel)
10410                         self._append_to_log_path(log_path, msg)
10411
10412         def _dblink_ebuild_phase(self,
10413                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10414                 """
10415                 Using this callback for merge phases allows the scheduler
10416                 to run while these phases execute asynchronously, and allows
10417                 the scheduler control output handling.
10418                 """
10419
10420                 scheduler = self._sched_iface
10421                 settings = pkg_dblink.settings
10422                 pkg = self._dblink_pkg(pkg_dblink)
10423                 background = self._background
10424                 log_path = settings.get("PORTAGE_LOG_FILE")
10425
10426                 ebuild_phase = EbuildPhase(background=background,
10427                         pkg=pkg, phase=phase, scheduler=scheduler,
10428                         settings=settings, tree=pkg_dblink.treetype)
10429                 ebuild_phase.start()
10430                 ebuild_phase.wait()
10431
10432                 return ebuild_phase.returncode
10433
10434         def _generate_digests(self):
10435                 """
10436                 Generate digests if necessary for --digests or FEATURES=digest.
10437                 In order to avoid interference, this must done before parallel
10438                 tasks are started.
10439                 """
10440
10441                 if '--fetchonly' in self.myopts:
10442                         return os.EX_OK
10443
10444                 digest = '--digest' in self.myopts
10445                 if not digest:
10446                         for pkgsettings in self.pkgsettings.itervalues():
10447                                 if 'digest' in pkgsettings.features:
10448                                         digest = True
10449                                         break
10450
10451                 if not digest:
10452                         return os.EX_OK
10453
10454                 for x in self._mergelist:
10455                         if not isinstance(x, Package) or \
10456                                 x.type_name != 'ebuild' or \
10457                                 x.operation != 'merge':
10458                                 continue
10459                         pkgsettings = self.pkgsettings[x.root]
10460                         if '--digest' not in self.myopts and \
10461                                 'digest' not in pkgsettings.features:
10462                                 continue
10463                         portdb = x.root_config.trees['porttree'].dbapi
10464                         ebuild_path = portdb.findname(x.cpv)
10465                         if not ebuild_path:
10466                                 writemsg_level(
10467                                         "!!! Could not locate ebuild for '%s'.\n" \
10468                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10469                                 return 1
10470                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10471                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10472                                 writemsg_level(
10473                                         "!!! Unable to generate manifest for '%s'.\n" \
10474                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10475                                 return 1
10476
10477                 return os.EX_OK
10478
10479         def _check_manifests(self):
10480                 # Verify all the manifests now so that the user is notified of failure
10481                 # as soon as possible.
10482                 if "strict" not in self.settings.features or \
10483                         "--fetchonly" in self.myopts or \
10484                         "--fetch-all-uri" in self.myopts:
10485                         return os.EX_OK
10486
10487                 shown_verifying_msg = False
10488                 quiet_settings = {}
10489                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10490                         quiet_config = portage.config(clone=pkgsettings)
10491                         quiet_config["PORTAGE_QUIET"] = "1"
10492                         quiet_config.backup_changes("PORTAGE_QUIET")
10493                         quiet_settings[myroot] = quiet_config
10494                         del quiet_config
10495
10496                 for x in self._mergelist:
10497                         if not isinstance(x, Package) or \
10498                                 x.type_name != "ebuild":
10499                                 continue
10500
10501                         if not shown_verifying_msg:
10502                                 shown_verifying_msg = True
10503                                 self._status_msg("Verifying ebuild manifests")
10504
10505                         root_config = x.root_config
10506                         portdb = root_config.trees["porttree"].dbapi
10507                         quiet_config = quiet_settings[root_config.root]
10508                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10509                         if not portage.digestcheck([], quiet_config, strict=True):
10510                                 return 1
10511
10512                 return os.EX_OK
10513
10514         def _add_prefetchers(self):
10515
10516                 if not self._parallel_fetch:
10517                         return
10518
10519                 if self._parallel_fetch:
10520                         self._status_msg("Starting parallel fetch")
10521
10522                         prefetchers = self._prefetchers
10523                         getbinpkg = "--getbinpkg" in self.myopts
10524
10525                         # In order to avoid "waiting for lock" messages
10526                         # at the beginning, which annoy users, never
10527                         # spawn a prefetcher for the first package.
10528                         for pkg in self._mergelist[1:]:
10529                                 prefetcher = self._create_prefetcher(pkg)
10530                                 if prefetcher is not None:
10531                                         self._task_queues.fetch.add(prefetcher)
10532                                         prefetchers[pkg] = prefetcher
10533
10534         def _create_prefetcher(self, pkg):
10535                 """
10536                 @return: a prefetcher, or None if not applicable
10537                 """
10538                 prefetcher = None
10539
10540                 if not isinstance(pkg, Package):
10541                         pass
10542
10543                 elif pkg.type_name == "ebuild":
10544
10545                         prefetcher = EbuildFetcher(background=True,
10546                                 config_pool=self._ConfigPool(pkg.root,
10547                                 self._allocate_config, self._deallocate_config),
10548                                 fetchonly=1, logfile=self._fetch_log,
10549                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10550
10551                 elif pkg.type_name == "binary" and \
10552                         "--getbinpkg" in self.myopts and \
10553                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10554
10555                         prefetcher = BinpkgPrefetcher(background=True,
10556                                 pkg=pkg, scheduler=self._sched_iface)
10557
10558                 return prefetcher
10559
10560         def _is_restart_scheduled(self):
10561                 """
10562                 Check if the merge list contains a replacement
10563                 for the current running instance, that will result
10564                 in restart after merge.
10565                 @rtype: bool
10566                 @returns: True if a restart is scheduled, False otherwise.
10567                 """
10568                 if self._opts_no_restart.intersection(self.myopts):
10569                         return False
10570
10571                 mergelist = self._mergelist
10572
10573                 for i, pkg in enumerate(mergelist):
10574                         if self._is_restart_necessary(pkg) and \
10575                                 i != len(mergelist) - 1:
10576                                 return True
10577
10578                 return False
10579
10580         def _is_restart_necessary(self, pkg):
10581                 """
10582                 @return: True if merging the given package
10583                         requires restart, False otherwise.
10584                 """
10585
10586                 # Figure out if we need a restart.
10587                 if pkg.root == self._running_root.root and \
10588                         portage.match_from_list(
10589                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10590                         if self._running_portage:
10591                                 return pkg.cpv != self._running_portage.cpv
10592                         return True
10593                 return False
10594
10595         def _restart_if_necessary(self, pkg):
10596                 """
10597                 Use execv() to restart emerge. This happens
10598                 if portage upgrades itself and there are
10599                 remaining packages in the list.
10600                 """
10601
10602                 if self._opts_no_restart.intersection(self.myopts):
10603                         return
10604
10605                 if not self._is_restart_necessary(pkg):
10606                         return
10607
10608                 if pkg == self._mergelist[-1]:
10609                         return
10610
10611                 self._main_loop_cleanup()
10612
10613                 logger = self._logger
10614                 pkg_count = self._pkg_count
10615                 mtimedb = self._mtimedb
10616                 bad_resume_opts = self._bad_resume_opts
10617
10618                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10619                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10620
10621                 logger.log(" *** RESTARTING " + \
10622                         "emerge via exec() after change of " + \
10623                         "portage version.")
10624
10625                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10626                 mtimedb.commit()
10627                 portage.run_exitfuncs()
10628                 mynewargv = [sys.argv[0], "--resume"]
10629                 resume_opts = self.myopts.copy()
10630                 # For automatic resume, we need to prevent
10631                 # any of bad_resume_opts from leaking in
10632                 # via EMERGE_DEFAULT_OPTS.
10633                 resume_opts["--ignore-default-opts"] = True
10634                 for myopt, myarg in resume_opts.iteritems():
10635                         if myopt not in bad_resume_opts:
10636                                 if myarg is True:
10637                                         mynewargv.append(myopt)
10638                                 else:
10639                                         mynewargv.append(myopt +"="+ str(myarg))
10640                 # priority only needs to be adjusted on the first run
10641                 os.environ["PORTAGE_NICENESS"] = "0"
10642                 os.execv(mynewargv[0], mynewargv)
10643
10644         def merge(self):
10645
10646                 if "--resume" in self.myopts:
10647                         # We're resuming.
10648                         portage.writemsg_stdout(
10649                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10650                         self._logger.log(" *** Resuming merge...")
10651
10652                 self._save_resume_list()
10653
10654                 try:
10655                         self._background = self._background_mode()
10656                 except self._unknown_internal_error:
10657                         return 1
10658
10659                 for root in self.trees:
10660                         root_config = self.trees[root]["root_config"]
10661
10662                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10663                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10664                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10665                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10666                         if not tmpdir or not os.path.isdir(tmpdir):
10667                                 msg = "The directory specified in your " + \
10668                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10669                                 "does not exist. Please create this " + \
10670                                 "directory or correct your PORTAGE_TMPDIR setting."
10671                                 msg = textwrap.wrap(msg, 70)
10672                                 out = portage.output.EOutput()
10673                                 for l in msg:
10674                                         out.eerror(l)
10675                                 return 1
10676
10677                         if self._background:
10678                                 root_config.settings.unlock()
10679                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10680                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10681                                 root_config.settings.lock()
10682
10683                         self.pkgsettings[root] = portage.config(
10684                                 clone=root_config.settings)
10685
10686                 rval = self._generate_digests()
10687                 if rval != os.EX_OK:
10688                         return rval
10689
10690                 rval = self._check_manifests()
10691                 if rval != os.EX_OK:
10692                         return rval
10693
10694                 keep_going = "--keep-going" in self.myopts
10695                 fetchonly = self._build_opts.fetchonly
10696                 mtimedb = self._mtimedb
10697                 failed_pkgs = self._failed_pkgs
10698
10699                 while True:
10700                         rval = self._merge()
10701                         if rval == os.EX_OK or fetchonly or not keep_going:
10702                                 break
10703                         if "resume" not in mtimedb:
10704                                 break
10705                         mergelist = self._mtimedb["resume"].get("mergelist")
10706                         if not mergelist:
10707                                 break
10708
10709                         if not failed_pkgs:
10710                                 break
10711
10712                         for failed_pkg in failed_pkgs:
10713                                 mergelist.remove(list(failed_pkg.pkg))
10714
10715                         self._failed_pkgs_all.extend(failed_pkgs)
10716                         del failed_pkgs[:]
10717
10718                         if not mergelist:
10719                                 break
10720
10721                         if not self._calc_resume_list():
10722                                 break
10723
10724                         clear_caches(self.trees)
10725                         if not self._mergelist:
10726                                 break
10727
10728                         self._save_resume_list()
10729                         self._pkg_count.curval = 0
10730                         self._pkg_count.maxval = len([x for x in self._mergelist \
10731                                 if isinstance(x, Package) and x.operation == "merge"])
10732                         self._status_display.maxval = self._pkg_count.maxval
10733
10734                 self._logger.log(" *** Finished. Cleaning up...")
10735
10736                 if failed_pkgs:
10737                         self._failed_pkgs_all.extend(failed_pkgs)
10738                         del failed_pkgs[:]
10739
10740                 background = self._background
10741                 failure_log_shown = False
10742                 if background and len(self._failed_pkgs_all) == 1:
10743                         # If only one package failed then just show it's
10744                         # whole log for easy viewing.
10745                         failed_pkg = self._failed_pkgs_all[-1]
10746                         build_dir = failed_pkg.build_dir
10747                         log_file = None
10748
10749                         log_paths = [failed_pkg.build_log]
10750
10751                         log_path = self._locate_failure_log(failed_pkg)
10752                         if log_path is not None:
10753                                 try:
10754                                         log_file = open(log_path)
10755                                 except IOError:
10756                                         pass
10757
10758                         if log_file is not None:
10759                                 try:
10760                                         for line in log_file:
10761                                                 writemsg_level(line, noiselevel=-1)
10762                                 finally:
10763                                         log_file.close()
10764                                 failure_log_shown = True
10765
10766                 # Dump mod_echo output now since it tends to flood the terminal.
10767                 # This allows us to avoid having more important output, generated
10768                 # later, from being swept away by the mod_echo output.
10769                 mod_echo_output =  _flush_elog_mod_echo()
10770
10771                 if background and not failure_log_shown and \
10772                         self._failed_pkgs_all and \
10773                         self._failed_pkgs_die_msgs and \
10774                         not mod_echo_output:
10775
10776                         printer = portage.output.EOutput()
10777                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10778                                 root_msg = ""
10779                                 if mysettings["ROOT"] != "/":
10780                                         root_msg = " merged to %s" % mysettings["ROOT"]
10781                                 print
10782                                 printer.einfo("Error messages for package %s%s:" % \
10783                                         (colorize("INFORM", key), root_msg))
10784                                 print
10785                                 for phase in portage.const.EBUILD_PHASES:
10786                                         if phase not in logentries:
10787                                                 continue
10788                                         for msgtype, msgcontent in logentries[phase]:
10789                                                 if isinstance(msgcontent, basestring):
10790                                                         msgcontent = [msgcontent]
10791                                                 for line in msgcontent:
10792                                                         printer.eerror(line.strip("\n"))
10793
10794                 if self._post_mod_echo_msgs:
10795                         for msg in self._post_mod_echo_msgs:
10796                                 msg()
10797
10798                 if len(self._failed_pkgs_all) > 1 or \
10799                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10800                         if len(self._failed_pkgs_all) > 1:
10801                                 msg = "The following %d packages have " % \
10802                                         len(self._failed_pkgs_all) + \
10803                                         "failed to build or install:"
10804                         else:
10805                                 msg = "The following package has " + \
10806                                         "failed to build or install:"
10807                         prefix = bad(" * ")
10808                         writemsg(prefix + "\n", noiselevel=-1)
10809                         from textwrap import wrap
10810                         for line in wrap(msg, 72):
10811                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10812                         writemsg(prefix + "\n", noiselevel=-1)
10813                         for failed_pkg in self._failed_pkgs_all:
10814                                 writemsg("%s\t%s\n" % (prefix,
10815                                         colorize("INFORM", str(failed_pkg.pkg))),
10816                                         noiselevel=-1)
10817                         writemsg(prefix + "\n", noiselevel=-1)
10818
10819                 return rval
10820
10821         def _elog_listener(self, mysettings, key, logentries, fulltext):
10822                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10823                 if errors:
10824                         self._failed_pkgs_die_msgs.append(
10825                                 (mysettings, key, errors))
10826
10827         def _locate_failure_log(self, failed_pkg):
10828
10829                 build_dir = failed_pkg.build_dir
10830                 log_file = None
10831
10832                 log_paths = [failed_pkg.build_log]
10833
10834                 for log_path in log_paths:
10835                         if not log_path:
10836                                 continue
10837
10838                         try:
10839                                 log_size = os.stat(log_path).st_size
10840                         except OSError:
10841                                 continue
10842
10843                         if log_size == 0:
10844                                 continue
10845
10846                         return log_path
10847
10848                 return None
10849
10850         def _add_packages(self):
10851                 pkg_queue = self._pkg_queue
10852                 for pkg in self._mergelist:
10853                         if isinstance(pkg, Package):
10854                                 pkg_queue.append(pkg)
10855                         elif isinstance(pkg, Blocker):
10856                                 pass
10857
10858         def _system_merge_started(self, merge):
10859                 """
10860                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10861                 """
10862                 graph = self._digraph
10863                 if graph is None:
10864                         return
10865                 pkg = merge.merge.pkg
10866
10867                 # Skip this if $ROOT != / since it shouldn't matter if there
10868                 # are unsatisfied system runtime deps in this case.
10869                 if pkg.root != '/':
10870                         return
10871
10872                 completed_tasks = self._completed_tasks
10873                 unsatisfied = self._unsatisfied_system_deps
10874
10875                 def ignore_non_runtime_or_satisfied(priority):
10876                         """
10877                         Ignore non-runtime and satisfied runtime priorities.
10878                         """
10879                         if isinstance(priority, DepPriority) and \
10880                                 not priority.satisfied and \
10881                                 (priority.runtime or priority.runtime_post):
10882                                 return False
10883                         return True
10884
10885                 # When checking for unsatisfied runtime deps, only check
10886                 # direct deps since indirect deps are checked when the
10887                 # corresponding parent is merged.
10888                 for child in graph.child_nodes(pkg,
10889                         ignore_priority=ignore_non_runtime_or_satisfied):
10890                         if not isinstance(child, Package) or \
10891                                 child.operation == 'uninstall':
10892                                 continue
10893                         if child is pkg:
10894                                 continue
10895                         if child.operation == 'merge' and \
10896                                 child not in completed_tasks:
10897                                 unsatisfied.add(child)
10898
10899         def _merge_wait_exit_handler(self, task):
10900                 self._merge_wait_scheduled.remove(task)
10901                 self._merge_exit(task)
10902
10903         def _merge_exit(self, merge):
10904                 self._do_merge_exit(merge)
10905                 self._deallocate_config(merge.merge.settings)
10906                 if merge.returncode == os.EX_OK and \
10907                         not merge.merge.pkg.installed:
10908                         self._status_display.curval += 1
10909                 self._status_display.merges = len(self._task_queues.merge)
10910                 self._schedule()
10911
10912         def _do_merge_exit(self, merge):
10913                 pkg = merge.merge.pkg
10914                 if merge.returncode != os.EX_OK:
10915                         settings = merge.merge.settings
10916                         build_dir = settings.get("PORTAGE_BUILDDIR")
10917                         build_log = settings.get("PORTAGE_LOG_FILE")
10918
10919                         self._failed_pkgs.append(self._failed_pkg(
10920                                 build_dir=build_dir, build_log=build_log,
10921                                 pkg=pkg,
10922                                 returncode=merge.returncode))
10923                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10924
10925                         self._status_display.failed = len(self._failed_pkgs)
10926                         return
10927
10928                 self._task_complete(pkg)
10929                 pkg_to_replace = merge.merge.pkg_to_replace
10930                 if pkg_to_replace is not None:
10931                         # When a package is replaced, mark it's uninstall
10932                         # task complete (if any).
10933                         uninst_hash_key = \
10934                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10935                         self._task_complete(uninst_hash_key)
10936
10937                 if pkg.installed:
10938                         return
10939
10940                 self._restart_if_necessary(pkg)
10941
10942                 # Call mtimedb.commit() after each merge so that
10943                 # --resume still works after being interrupted
10944                 # by reboot, sigkill or similar.
10945                 mtimedb = self._mtimedb
10946                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10947                 if not mtimedb["resume"]["mergelist"]:
10948                         del mtimedb["resume"]
10949                 mtimedb.commit()
10950
10951         def _build_exit(self, build):
10952                 if build.returncode == os.EX_OK:
10953                         self.curval += 1
10954                         merge = PackageMerge(merge=build)
10955                         if not build.build_opts.buildpkgonly and \
10956                                 build.pkg in self._deep_system_deps:
10957                                 # Since dependencies on system packages are frequently
10958                                 # unspecified, merge them only when no builds are executing.
10959                                 self._merge_wait_queue.append(merge)
10960                                 merge.addStartListener(self._system_merge_started)
10961                         else:
10962                                 merge.addExitListener(self._merge_exit)
10963                                 self._task_queues.merge.add(merge)
10964                                 self._status_display.merges = len(self._task_queues.merge)
10965                 else:
10966                         settings = build.settings
10967                         build_dir = settings.get("PORTAGE_BUILDDIR")
10968                         build_log = settings.get("PORTAGE_LOG_FILE")
10969
10970                         self._failed_pkgs.append(self._failed_pkg(
10971                                 build_dir=build_dir, build_log=build_log,
10972                                 pkg=build.pkg,
10973                                 returncode=build.returncode))
10974                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10975
10976                         self._status_display.failed = len(self._failed_pkgs)
10977                         self._deallocate_config(build.settings)
10978                 self._jobs -= 1
10979                 self._status_display.running = self._jobs
10980                 self._schedule()
10981
10982         def _extract_exit(self, build):
10983                 self._build_exit(build)
10984
10985         def _task_complete(self, pkg):
10986                 self._completed_tasks.add(pkg)
10987                 self._unsatisfied_system_deps.discard(pkg)
10988                 self._choose_pkg_return_early = False
10989
10990         def _merge(self):
10991
10992                 self._add_prefetchers()
10993                 self._add_packages()
10994                 pkg_queue = self._pkg_queue
10995                 failed_pkgs = self._failed_pkgs
10996                 portage.locks._quiet = self._background
10997                 portage.elog._emerge_elog_listener = self._elog_listener
10998                 rval = os.EX_OK
10999
11000                 try:
11001                         self._main_loop()
11002                 finally:
11003                         self._main_loop_cleanup()
11004                         portage.locks._quiet = False
11005                         portage.elog._emerge_elog_listener = None
11006                         if failed_pkgs:
11007                                 rval = failed_pkgs[-1].returncode
11008
11009                 return rval
11010
11011         def _main_loop_cleanup(self):
11012                 del self._pkg_queue[:]
11013                 self._completed_tasks.clear()
11014                 self._deep_system_deps.clear()
11015                 self._unsatisfied_system_deps.clear()
11016                 self._choose_pkg_return_early = False
11017                 self._status_display.reset()
11018                 self._digraph = None
11019                 self._task_queues.fetch.clear()
11020
11021         def _choose_pkg(self):
11022                 """
11023                 Choose a task that has all it's dependencies satisfied.
11024                 """
11025
11026                 if self._choose_pkg_return_early:
11027                         return None
11028
11029                 if self._digraph is None:
11030                         if (self._jobs or self._task_queues.merge) and \
11031                                 not ("--nodeps" in self.myopts and \
11032                                 (self._max_jobs is True or self._max_jobs > 1)):
11033                                 self._choose_pkg_return_early = True
11034                                 return None
11035                         return self._pkg_queue.pop(0)
11036
11037                 if not (self._jobs or self._task_queues.merge):
11038                         return self._pkg_queue.pop(0)
11039
11040                 self._prune_digraph()
11041
11042                 chosen_pkg = None
11043                 later = set(self._pkg_queue)
11044                 for pkg in self._pkg_queue:
11045                         later.remove(pkg)
11046                         if not self._dependent_on_scheduled_merges(pkg, later):
11047                                 chosen_pkg = pkg
11048                                 break
11049
11050                 if chosen_pkg is not None:
11051                         self._pkg_queue.remove(chosen_pkg)
11052
11053                 if chosen_pkg is None:
11054                         # There's no point in searching for a package to
11055                         # choose until at least one of the existing jobs
11056                         # completes.
11057                         self._choose_pkg_return_early = True
11058
11059                 return chosen_pkg
11060
11061         def _dependent_on_scheduled_merges(self, pkg, later):
11062                 """
11063                 Traverse the subgraph of the given packages deep dependencies
11064                 to see if it contains any scheduled merges.
11065                 @param pkg: a package to check dependencies for
11066                 @type pkg: Package
11067                 @param later: packages for which dependence should be ignored
11068                         since they will be merged later than pkg anyway and therefore
11069                         delaying the merge of pkg will not result in a more optimal
11070                         merge order
11071                 @type later: set
11072                 @rtype: bool
11073                 @returns: True if the package is dependent, False otherwise.
11074                 """
11075
11076                 graph = self._digraph
11077                 completed_tasks = self._completed_tasks
11078
11079                 dependent = False
11080                 traversed_nodes = set([pkg])
11081                 direct_deps = graph.child_nodes(pkg)
11082                 node_stack = direct_deps
11083                 direct_deps = frozenset(direct_deps)
11084                 while node_stack:
11085                         node = node_stack.pop()
11086                         if node in traversed_nodes:
11087                                 continue
11088                         traversed_nodes.add(node)
11089                         if not ((node.installed and node.operation == "nomerge") or \
11090                                 (node.operation == "uninstall" and \
11091                                 node not in direct_deps) or \
11092                                 node in completed_tasks or \
11093                                 node in later):
11094                                 dependent = True
11095                                 break
11096                         node_stack.extend(graph.child_nodes(node))
11097
11098                 return dependent
11099
11100         def _allocate_config(self, root):
11101                 """
11102                 Allocate a unique config instance for a task in order
11103                 to prevent interference between parallel tasks.
11104                 """
11105                 if self._config_pool[root]:
11106                         temp_settings = self._config_pool[root].pop()
11107                 else:
11108                         temp_settings = portage.config(clone=self.pkgsettings[root])
11109                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11110                 # performance reasons, call it here to make sure all settings from the
11111                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11112                 temp_settings.reload()
11113                 temp_settings.reset()
11114                 return temp_settings
11115
11116         def _deallocate_config(self, settings):
11117                 self._config_pool[settings["ROOT"]].append(settings)
11118
11119         def _main_loop(self):
11120
11121                 # Only allow 1 job max if a restart is scheduled
11122                 # due to portage update.
11123                 if self._is_restart_scheduled() or \
11124                         self._opts_no_background.intersection(self.myopts):
11125                         self._set_max_jobs(1)
11126
11127                 merge_queue = self._task_queues.merge
11128
11129                 while self._schedule():
11130                         if self._poll_event_handlers:
11131                                 self._poll_loop()
11132
11133                 while True:
11134                         self._schedule()
11135                         if not (self._jobs or merge_queue):
11136                                 break
11137                         if self._poll_event_handlers:
11138                                 self._poll_loop()
11139
11140         def _keep_scheduling(self):
11141                 return bool(self._pkg_queue and \
11142                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11143
11144         def _schedule_tasks(self):
11145
11146                 # When the number of jobs drops to zero, process all waiting merges.
11147                 if not self._jobs and self._merge_wait_queue:
11148                         for task in self._merge_wait_queue:
11149                                 task.addExitListener(self._merge_wait_exit_handler)
11150                                 self._task_queues.merge.add(task)
11151                         self._status_display.merges = len(self._task_queues.merge)
11152                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11153                         del self._merge_wait_queue[:]
11154
11155                 self._schedule_tasks_imp()
11156                 self._status_display.display()
11157
11158                 state_change = 0
11159                 for q in self._task_queues.values():
11160                         if q.schedule():
11161                                 state_change += 1
11162
11163                 # Cancel prefetchers if they're the only reason
11164                 # the main poll loop is still running.
11165                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11166                         not (self._jobs or self._task_queues.merge) and \
11167                         self._task_queues.fetch:
11168                         self._task_queues.fetch.clear()
11169                         state_change += 1
11170
11171                 if state_change:
11172                         self._schedule_tasks_imp()
11173                         self._status_display.display()
11174
11175                 return self._keep_scheduling()
11176
11177         def _job_delay(self):
11178                 """
11179                 @rtype: bool
11180                 @returns: True if job scheduling should be delayed, False otherwise.
11181                 """
11182
11183                 if self._jobs and self._max_load is not None:
11184
11185                         current_time = time.time()
11186
11187                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11188                         if delay > self._job_delay_max:
11189                                 delay = self._job_delay_max
11190                         if (current_time - self._previous_job_start_time) < delay:
11191                                 return True
11192
11193                 return False
11194
11195         def _schedule_tasks_imp(self):
11196                 """
11197                 @rtype: bool
11198                 @returns: True if state changed, False otherwise.
11199                 """
11200
11201                 state_change = 0
11202
11203                 while True:
11204
11205                         if not self._keep_scheduling():
11206                                 return bool(state_change)
11207
11208                         if self._choose_pkg_return_early or \
11209                                 self._merge_wait_scheduled or \
11210                                 (self._jobs and self._unsatisfied_system_deps) or \
11211                                 not self._can_add_job() or \
11212                                 self._job_delay():
11213                                 return bool(state_change)
11214
11215                         pkg = self._choose_pkg()
11216                         if pkg is None:
11217                                 return bool(state_change)
11218
11219                         state_change += 1
11220
11221                         if not pkg.installed:
11222                                 self._pkg_count.curval += 1
11223
11224                         task = self._task(pkg)
11225
11226                         if pkg.installed:
11227                                 merge = PackageMerge(merge=task)
11228                                 merge.addExitListener(self._merge_exit)
11229                                 self._task_queues.merge.add(merge)
11230
11231                         elif pkg.built:
11232                                 self._jobs += 1
11233                                 self._previous_job_start_time = time.time()
11234                                 self._status_display.running = self._jobs
11235                                 task.addExitListener(self._extract_exit)
11236                                 self._task_queues.jobs.add(task)
11237
11238                         else:
11239                                 self._jobs += 1
11240                                 self._previous_job_start_time = time.time()
11241                                 self._status_display.running = self._jobs
11242                                 task.addExitListener(self._build_exit)
11243                                 self._task_queues.jobs.add(task)
11244
11245                 return bool(state_change)
11246
11247         def _task(self, pkg):
11248
11249                 pkg_to_replace = None
11250                 if pkg.operation != "uninstall":
11251                         vardb = pkg.root_config.trees["vartree"].dbapi
11252                         previous_cpv = vardb.match(pkg.slot_atom)
11253                         if previous_cpv:
11254                                 previous_cpv = previous_cpv.pop()
11255                                 pkg_to_replace = self._pkg(previous_cpv,
11256                                         "installed", pkg.root_config, installed=True)
11257
11258                 task = MergeListItem(args_set=self._args_set,
11259                         background=self._background, binpkg_opts=self._binpkg_opts,
11260                         build_opts=self._build_opts,
11261                         config_pool=self._ConfigPool(pkg.root,
11262                         self._allocate_config, self._deallocate_config),
11263                         emerge_opts=self.myopts,
11264                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11265                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11266                         pkg_to_replace=pkg_to_replace,
11267                         prefetcher=self._prefetchers.get(pkg),
11268                         scheduler=self._sched_iface,
11269                         settings=self._allocate_config(pkg.root),
11270                         statusMessage=self._status_msg,
11271                         world_atom=self._world_atom)
11272
11273                 return task
11274
11275         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11276                 pkg = failed_pkg.pkg
11277                 msg = "%s to %s %s" % \
11278                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11279                 if pkg.root != "/":
11280                         msg += " %s %s" % (preposition, pkg.root)
11281
11282                 log_path = self._locate_failure_log(failed_pkg)
11283                 if log_path is not None:
11284                         msg += ", Log file:"
11285                 self._status_msg(msg)
11286
11287                 if log_path is not None:
11288                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11289
11290         def _status_msg(self, msg):
11291                 """
11292                 Display a brief status message (no newlines) in the status display.
11293                 This is called by tasks to provide feedback to the user. This
11294                 delegates the resposibility of generating \r and \n control characters,
11295                 to guarantee that lines are created or erased when necessary and
11296                 appropriate.
11297
11298                 @type msg: str
11299                 @param msg: a brief status message (no newlines allowed)
11300                 """
11301                 if not self._background:
11302                         writemsg_level("\n")
11303                 self._status_display.displayMessage(msg)
11304
11305         def _save_resume_list(self):
11306                 """
11307                 Do this before verifying the ebuild Manifests since it might
11308                 be possible for the user to use --resume --skipfirst get past
11309                 a non-essential package with a broken digest.
11310                 """
11311                 mtimedb = self._mtimedb
11312                 mtimedb["resume"]["mergelist"] = [list(x) \
11313                         for x in self._mergelist \
11314                         if isinstance(x, Package) and x.operation == "merge"]
11315
11316                 mtimedb.commit()
11317
11318         def _calc_resume_list(self):
11319                 """
11320                 Use the current resume list to calculate a new one,
11321                 dropping any packages with unsatisfied deps.
11322                 @rtype: bool
11323                 @returns: True if successful, False otherwise.
11324                 """
11325                 print colorize("GOOD", "*** Resuming merge...")
11326
11327                 if self._show_list():
11328                         if "--tree" in self.myopts:
11329                                 portage.writemsg_stdout("\n" + \
11330                                         darkgreen("These are the packages that " + \
11331                                         "would be merged, in reverse order:\n\n"))
11332
11333                         else:
11334                                 portage.writemsg_stdout("\n" + \
11335                                         darkgreen("These are the packages that " + \
11336                                         "would be merged, in order:\n\n"))
11337
11338                 show_spinner = "--quiet" not in self.myopts and \
11339                         "--nodeps" not in self.myopts
11340
11341                 if show_spinner:
11342                         print "Calculating dependencies  ",
11343
11344                 myparams = create_depgraph_params(self.myopts, None)
11345                 success = False
11346                 e = None
11347                 try:
11348                         success, mydepgraph, dropped_tasks = resume_depgraph(
11349                                 self.settings, self.trees, self._mtimedb, self.myopts,
11350                                 myparams, self._spinner)
11351                 except depgraph.UnsatisfiedResumeDep, exc:
11352                         # rename variable to avoid python-3.0 error:
11353                         # SyntaxError: can not delete variable 'e' referenced in nested
11354                         #              scope
11355                         e = exc
11356                         mydepgraph = e.depgraph
11357                         dropped_tasks = set()
11358
11359                 if show_spinner:
11360                         print "\b\b... done!"
11361
11362                 if e is not None:
11363                         def unsatisfied_resume_dep_msg():
11364                                 mydepgraph.display_problems()
11365                                 out = portage.output.EOutput()
11366                                 out.eerror("One or more packages are either masked or " + \
11367                                         "have missing dependencies:")
11368                                 out.eerror("")
11369                                 indent = "  "
11370                                 show_parents = set()
11371                                 for dep in e.value:
11372                                         if dep.parent in show_parents:
11373                                                 continue
11374                                         show_parents.add(dep.parent)
11375                                         if dep.atom is None:
11376                                                 out.eerror(indent + "Masked package:")
11377                                                 out.eerror(2 * indent + str(dep.parent))
11378                                                 out.eerror("")
11379                                         else:
11380                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11381                                                 out.eerror(2 * indent + str(dep.parent))
11382                                                 out.eerror("")
11383                                 msg = "The resume list contains packages " + \
11384                                         "that are either masked or have " + \
11385                                         "unsatisfied dependencies. " + \
11386                                         "Please restart/continue " + \
11387                                         "the operation manually, or use --skipfirst " + \
11388                                         "to skip the first package in the list and " + \
11389                                         "any other packages that may be " + \
11390                                         "masked or have missing dependencies."
11391                                 for line in textwrap.wrap(msg, 72):
11392                                         out.eerror(line)
11393                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11394                         return False
11395
11396                 if success and self._show_list():
11397                         mylist = mydepgraph.altlist()
11398                         if mylist:
11399                                 if "--tree" in self.myopts:
11400                                         mylist.reverse()
11401                                 mydepgraph.display(mylist, favorites=self._favorites)
11402
11403                 if not success:
11404                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11405                         return False
11406                 mydepgraph.display_problems()
11407
11408                 mylist = mydepgraph.altlist()
11409                 mydepgraph.break_refs(mylist)
11410                 mydepgraph.break_refs(dropped_tasks)
11411                 self._mergelist = mylist
11412                 self._set_digraph(mydepgraph.schedulerGraph())
11413
11414                 msg_width = 75
11415                 for task in dropped_tasks:
11416                         if not (isinstance(task, Package) and task.operation == "merge"):
11417                                 continue
11418                         pkg = task
11419                         msg = "emerge --keep-going:" + \
11420                                 " %s" % (pkg.cpv,)
11421                         if pkg.root != "/":
11422                                 msg += " for %s" % (pkg.root,)
11423                         msg += " dropped due to unsatisfied dependency."
11424                         for line in textwrap.wrap(msg, msg_width):
11425                                 eerror(line, phase="other", key=pkg.cpv)
11426                         settings = self.pkgsettings[pkg.root]
11427                         # Ensure that log collection from $T is disabled inside
11428                         # elog_process(), since any logs that might exist are
11429                         # not valid here.
11430                         settings.pop("T", None)
11431                         portage.elog.elog_process(pkg.cpv, settings)
11432                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11433
11434                 return True
11435
11436         def _show_list(self):
11437                 myopts = self.myopts
11438                 if "--quiet" not in myopts and \
11439                         ("--ask" in myopts or "--tree" in myopts or \
11440                         "--verbose" in myopts):
11441                         return True
11442                 return False
11443
11444         def _world_atom(self, pkg):
11445                 """
11446                 Add the package to the world file, but only if
11447                 it's supposed to be added. Otherwise, do nothing.
11448                 """
11449
11450                 if set(("--buildpkgonly", "--fetchonly",
11451                         "--fetch-all-uri",
11452                         "--oneshot", "--onlydeps",
11453                         "--pretend")).intersection(self.myopts):
11454                         return
11455
11456                 if pkg.root != self.target_root:
11457                         return
11458
11459                 args_set = self._args_set
11460                 if not args_set.findAtomForPackage(pkg):
11461                         return
11462
11463                 logger = self._logger
11464                 pkg_count = self._pkg_count
11465                 root_config = pkg.root_config
11466                 world_set = root_config.sets["world"]
11467                 world_locked = False
11468                 if hasattr(world_set, "lock"):
11469                         world_set.lock()
11470                         world_locked = True
11471
11472                 try:
11473                         if hasattr(world_set, "load"):
11474                                 world_set.load() # maybe it's changed on disk
11475
11476                         atom = create_world_atom(pkg, args_set, root_config)
11477                         if atom:
11478                                 if hasattr(world_set, "add"):
11479                                         self._status_msg(('Recording %s in "world" ' + \
11480                                                 'favorites file...') % atom)
11481                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11482                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11483                                         world_set.add(atom)
11484                                 else:
11485                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11486                                                 (atom,), level=logging.WARN, noiselevel=-1)
11487                 finally:
11488                         if world_locked:
11489                                 world_set.unlock()
11490
11491         def _pkg(self, cpv, type_name, root_config, installed=False):
11492                 """
11493                 Get a package instance from the cache, or create a new
11494                 one if necessary. Raises KeyError from aux_get if it
11495                 failures for some reason (package does not exist or is
11496                 corrupt).
11497                 """
11498                 operation = "merge"
11499                 if installed:
11500                         operation = "nomerge"
11501
11502                 if self._digraph is not None:
11503                         # Reuse existing instance when available.
11504                         pkg = self._digraph.get(
11505                                 (type_name, root_config.root, cpv, operation))
11506                         if pkg is not None:
11507                                 return pkg
11508
11509                 tree_type = depgraph.pkg_tree_map[type_name]
11510                 db = root_config.trees[tree_type].dbapi
11511                 db_keys = list(self.trees[root_config.root][
11512                         tree_type].dbapi._aux_cache_keys)
11513                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11514                 pkg = Package(cpv=cpv, metadata=metadata,
11515                         root_config=root_config, installed=installed)
11516                 if type_name == "ebuild":
11517                         settings = self.pkgsettings[root_config.root]
11518                         settings.setcpv(pkg)
11519                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11520                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11521
11522                 return pkg
11523
11524 class MetadataRegen(PollScheduler):
11525
11526         def __init__(self, portdb, max_jobs=None, max_load=None):
11527                 PollScheduler.__init__(self)
11528                 self._portdb = portdb
11529
11530                 if max_jobs is None:
11531                         max_jobs = 1
11532
11533                 self._max_jobs = max_jobs
11534                 self._max_load = max_load
11535                 self._sched_iface = self._sched_iface_class(
11536                         register=self._register,
11537                         schedule=self._schedule_wait,
11538                         unregister=self._unregister)
11539
11540                 self._valid_pkgs = set()
11541                 self._process_iter = self._iter_metadata_processes()
11542                 self.returncode = os.EX_OK
11543                 self._error_count = 0
11544
11545         def _iter_metadata_processes(self):
11546                 portdb = self._portdb
11547                 valid_pkgs = self._valid_pkgs
11548                 every_cp = portdb.cp_all()
11549                 every_cp.sort(reverse=True)
11550
11551                 while every_cp:
11552                         cp = every_cp.pop()
11553                         portage.writemsg_stdout("Processing %s\n" % cp)
11554                         cpv_list = portdb.cp_list(cp)
11555                         for cpv in cpv_list:
11556                                 valid_pkgs.add(cpv)
11557                                 ebuild_path, repo_path = portdb.findname2(cpv)
11558                                 metadata_process = portdb._metadata_process(
11559                                         cpv, ebuild_path, repo_path)
11560                                 if metadata_process is None:
11561                                         continue
11562                                 yield metadata_process
11563
11564         def run(self):
11565
11566                 portdb = self._portdb
11567                 from portage.cache.cache_errors import CacheError
11568                 dead_nodes = {}
11569
11570                 for mytree in portdb.porttrees:
11571                         try:
11572                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11573                         except CacheError, e:
11574                                 portage.writemsg("Error listing cache entries for " + \
11575                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11576                                 del e
11577                                 dead_nodes = None
11578                                 break
11579
11580                 while self._schedule():
11581                         self._poll_loop()
11582
11583                 while self._jobs:
11584                         self._poll_loop()
11585
11586                 if dead_nodes:
11587                         for y in self._valid_pkgs:
11588                                 for mytree in portdb.porttrees:
11589                                         if portdb.findname2(y, mytree=mytree)[0]:
11590                                                 dead_nodes[mytree].discard(y)
11591
11592                         for mytree, nodes in dead_nodes.iteritems():
11593                                 auxdb = portdb.auxdb[mytree]
11594                                 for y in nodes:
11595                                         try:
11596                                                 del auxdb[y]
11597                                         except (KeyError, CacheError):
11598                                                 pass
11599
11600         def _schedule_tasks(self):
11601                 """
11602                 @rtype: bool
11603                 @returns: True if there may be remaining tasks to schedule,
11604                         False otherwise.
11605                 """
11606                 while self._can_add_job():
11607                         try:
11608                                 metadata_process = self._process_iter.next()
11609                         except StopIteration:
11610                                 return False
11611
11612                         self._jobs += 1
11613                         metadata_process.scheduler = self._sched_iface
11614                         metadata_process.addExitListener(self._metadata_exit)
11615                         metadata_process.start()
11616                 return True
11617
11618         def _metadata_exit(self, metadata_process):
11619                 self._jobs -= 1
11620                 if metadata_process.returncode != os.EX_OK:
11621                         self.returncode = 1
11622                         self._error_count += 1
11623                         self._valid_pkgs.discard(metadata_process.cpv)
11624                         portage.writemsg("Error processing %s, continuing...\n" % \
11625                                 (metadata_process.cpv,))
11626                 self._schedule()
11627
11628 class UninstallFailure(portage.exception.PortageException):
11629         """
11630         An instance of this class is raised by unmerge() when
11631         an uninstallation fails.
11632         """
11633         status = 1
11634         def __init__(self, *pargs):
11635                 portage.exception.PortageException.__init__(self, pargs)
11636                 if pargs:
11637                         self.status = pargs[0]
11638
11639 def unmerge(root_config, myopts, unmerge_action,
11640         unmerge_files, ldpath_mtimes, autoclean=0,
11641         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11642         scheduler=None, writemsg_level=portage.util.writemsg_level):
11643
11644         quiet = "--quiet" in myopts
11645         settings = root_config.settings
11646         sets = root_config.sets
11647         vartree = root_config.trees["vartree"]
11648         candidate_catpkgs=[]
11649         global_unmerge=0
11650         xterm_titles = "notitles" not in settings.features
11651         out = portage.output.EOutput()
11652         pkg_cache = {}
11653         db_keys = list(vartree.dbapi._aux_cache_keys)
11654
11655         def _pkg(cpv):
11656                 pkg = pkg_cache.get(cpv)
11657                 if pkg is None:
11658                         pkg = Package(cpv=cpv, installed=True,
11659                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11660                                 root_config=root_config,
11661                                 type_name="installed")
11662                         pkg_cache[cpv] = pkg
11663                 return pkg
11664
11665         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11666         try:
11667                 # At least the parent needs to exist for the lock file.
11668                 portage.util.ensure_dirs(vdb_path)
11669         except portage.exception.PortageException:
11670                 pass
11671         vdb_lock = None
11672         try:
11673                 if os.access(vdb_path, os.W_OK):
11674                         vdb_lock = portage.locks.lockdir(vdb_path)
11675                 realsyslist = sets["system"].getAtoms()
11676                 syslist = []
11677                 for x in realsyslist:
11678                         mycp = portage.dep_getkey(x)
11679                         if mycp in settings.getvirtuals():
11680                                 providers = []
11681                                 for provider in settings.getvirtuals()[mycp]:
11682                                         if vartree.dbapi.match(provider):
11683                                                 providers.append(provider)
11684                                 if len(providers) == 1:
11685                                         syslist.extend(providers)
11686                         else:
11687                                 syslist.append(mycp)
11688         
11689                 mysettings = portage.config(clone=settings)
11690         
11691                 if not unmerge_files:
11692                         if unmerge_action == "unmerge":
11693                                 print
11694                                 print bold("emerge unmerge") + " can only be used with specific package names"
11695                                 print
11696                                 return 0
11697                         else:
11698                                 global_unmerge = 1
11699         
11700                 localtree = vartree
11701                 # process all arguments and add all
11702                 # valid db entries to candidate_catpkgs
11703                 if global_unmerge:
11704                         if not unmerge_files:
11705                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11706                 else:
11707                         #we've got command-line arguments
11708                         if not unmerge_files:
11709                                 print "\nNo packages to unmerge have been provided.\n"
11710                                 return 0
11711                         for x in unmerge_files:
11712                                 arg_parts = x.split('/')
11713                                 if x[0] not in [".","/"] and \
11714                                         arg_parts[-1][-7:] != ".ebuild":
11715                                         #possible cat/pkg or dep; treat as such
11716                                         candidate_catpkgs.append(x)
11717                                 elif unmerge_action in ["prune","clean"]:
11718                                         print "\n!!! Prune and clean do not accept individual" + \
11719                                                 " ebuilds as arguments;\n    skipping.\n"
11720                                         continue
11721                                 else:
11722                                         # it appears that the user is specifying an installed
11723                                         # ebuild and we're in "unmerge" mode, so it's ok.
11724                                         if not os.path.exists(x):
11725                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11726                                                 return 0
11727         
11728                                         absx   = os.path.abspath(x)
11729                                         sp_absx = absx.split("/")
11730                                         if sp_absx[-1][-7:] == ".ebuild":
11731                                                 del sp_absx[-1]
11732                                                 absx = "/".join(sp_absx)
11733         
11734                                         sp_absx_len = len(sp_absx)
11735         
11736                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11737                                         vdb_len  = len(vdb_path)
11738         
11739                                         sp_vdb     = vdb_path.split("/")
11740                                         sp_vdb_len = len(sp_vdb)
11741         
11742                                         if not os.path.exists(absx+"/CONTENTS"):
11743                                                 print "!!! Not a valid db dir: "+str(absx)
11744                                                 return 0
11745         
11746                                         if sp_absx_len <= sp_vdb_len:
11747                                                 # The Path is shorter... so it can't be inside the vdb.
11748                                                 print sp_absx
11749                                                 print absx
11750                                                 print "\n!!!",x,"cannot be inside "+ \
11751                                                         vdb_path+"; aborting.\n"
11752                                                 return 0
11753         
11754                                         for idx in range(0,sp_vdb_len):
11755                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11756                                                         print sp_absx
11757                                                         print absx
11758                                                         print "\n!!!", x, "is not inside "+\
11759                                                                 vdb_path+"; aborting.\n"
11760                                                         return 0
11761         
11762                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11763                                         candidate_catpkgs.append(
11764                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11765         
11766                 newline=""
11767                 if (not "--quiet" in myopts):
11768                         newline="\n"
11769                 if settings["ROOT"] != "/":
11770                         writemsg_level(darkgreen(newline+ \
11771                                 ">>> Using system located in ROOT tree %s\n" % \
11772                                 settings["ROOT"]))
11773
11774                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11775                         not ("--quiet" in myopts):
11776                         writemsg_level(darkgreen(newline+\
11777                                 ">>> These are the packages that would be unmerged:\n"))
11778
11779                 # Preservation of order is required for --depclean and --prune so
11780                 # that dependencies are respected. Use all_selected to eliminate
11781                 # duplicate packages since the same package may be selected by
11782                 # multiple atoms.
11783                 pkgmap = []
11784                 all_selected = set()
11785                 for x in candidate_catpkgs:
11786                         # cycle through all our candidate deps and determine
11787                         # what will and will not get unmerged
11788                         try:
11789                                 mymatch = vartree.dbapi.match(x)
11790                         except portage.exception.AmbiguousPackageName, errpkgs:
11791                                 print "\n\n!!! The short ebuild name \"" + \
11792                                         x + "\" is ambiguous.  Please specify"
11793                                 print "!!! one of the following fully-qualified " + \
11794                                         "ebuild names instead:\n"
11795                                 for i in errpkgs[0]:
11796                                         print "    " + green(i)
11797                                 print
11798                                 sys.exit(1)
11799         
11800                         if not mymatch and x[0] not in "<>=~":
11801                                 mymatch = localtree.dep_match(x)
11802                         if not mymatch:
11803                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11804                                         (x, unmerge_action), noiselevel=-1)
11805                                 continue
11806
11807                         pkgmap.append(
11808                                 {"protected": set(), "selected": set(), "omitted": set()})
11809                         mykey = len(pkgmap) - 1
11810                         if unmerge_action=="unmerge":
11811                                         for y in mymatch:
11812                                                 if y not in all_selected:
11813                                                         pkgmap[mykey]["selected"].add(y)
11814                                                         all_selected.add(y)
11815                         elif unmerge_action == "prune":
11816                                 if len(mymatch) == 1:
11817                                         continue
11818                                 best_version = mymatch[0]
11819                                 best_slot = vartree.getslot(best_version)
11820                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11821                                 for mypkg in mymatch[1:]:
11822                                         myslot = vartree.getslot(mypkg)
11823                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11824                                         if (myslot == best_slot and mycounter > best_counter) or \
11825                                                 mypkg == portage.best([mypkg, best_version]):
11826                                                 if myslot == best_slot:
11827                                                         if mycounter < best_counter:
11828                                                                 # On slot collision, keep the one with the
11829                                                                 # highest counter since it is the most
11830                                                                 # recently installed.
11831                                                                 continue
11832                                                 best_version = mypkg
11833                                                 best_slot = myslot
11834                                                 best_counter = mycounter
11835                                 pkgmap[mykey]["protected"].add(best_version)
11836                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11837                                         if mypkg != best_version and mypkg not in all_selected)
11838                                 all_selected.update(pkgmap[mykey]["selected"])
11839                         else:
11840                                 # unmerge_action == "clean"
11841                                 slotmap={}
11842                                 for mypkg in mymatch:
11843                                         if unmerge_action == "clean":
11844                                                 myslot = localtree.getslot(mypkg)
11845                                         else:
11846                                                 # since we're pruning, we don't care about slots
11847                                                 # and put all the pkgs in together
11848                                                 myslot = 0
11849                                         if myslot not in slotmap:
11850                                                 slotmap[myslot] = {}
11851                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11852
11853                                 for mypkg in vartree.dbapi.cp_list(
11854                                         portage.dep_getkey(mymatch[0])):
11855                                         myslot = vartree.getslot(mypkg)
11856                                         if myslot not in slotmap:
11857                                                 slotmap[myslot] = {}
11858                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11859
11860                                 for myslot in slotmap:
11861                                         counterkeys = slotmap[myslot].keys()
11862                                         if not counterkeys:
11863                                                 continue
11864                                         counterkeys.sort()
11865                                         pkgmap[mykey]["protected"].add(
11866                                                 slotmap[myslot][counterkeys[-1]])
11867                                         del counterkeys[-1]
11868
11869                                         for counter in counterkeys[:]:
11870                                                 mypkg = slotmap[myslot][counter]
11871                                                 if mypkg not in mymatch:
11872                                                         counterkeys.remove(counter)
11873                                                         pkgmap[mykey]["protected"].add(
11874                                                                 slotmap[myslot][counter])
11875
11876                                         #be pretty and get them in order of merge:
11877                                         for ckey in counterkeys:
11878                                                 mypkg = slotmap[myslot][ckey]
11879                                                 if mypkg not in all_selected:
11880                                                         pkgmap[mykey]["selected"].add(mypkg)
11881                                                         all_selected.add(mypkg)
11882                                         # ok, now the last-merged package
11883                                         # is protected, and the rest are selected
11884                 numselected = len(all_selected)
11885                 if global_unmerge and not numselected:
11886                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11887                         return 0
11888         
11889                 if not numselected:
11890                         portage.writemsg_stdout(
11891                                 "\n>>> No packages selected for removal by " + \
11892                                 unmerge_action + "\n")
11893                         return 0
11894         finally:
11895                 if vdb_lock:
11896                         vartree.dbapi.flush_cache()
11897                         portage.locks.unlockdir(vdb_lock)
11898         
11899         from portage.sets.base import EditablePackageSet
11900         
11901         # generate a list of package sets that are directly or indirectly listed in "world",
11902         # as there is no persistent list of "installed" sets
11903         installed_sets = ["world"]
11904         stop = False
11905         pos = 0
11906         while not stop:
11907                 stop = True
11908                 pos = len(installed_sets)
11909                 for s in installed_sets[pos - 1:]:
11910                         if s not in sets:
11911                                 continue
11912                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11913                         if candidates:
11914                                 stop = False
11915                                 installed_sets += candidates
11916         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11917         del stop, pos
11918
11919         # we don't want to unmerge packages that are still listed in user-editable package sets
11920         # listed in "world" as they would be remerged on the next update of "world" or the 
11921         # relevant package sets.
11922         unknown_sets = set()
11923         for cp in xrange(len(pkgmap)):
11924                 for cpv in pkgmap[cp]["selected"].copy():
11925                         try:
11926                                 pkg = _pkg(cpv)
11927                         except KeyError:
11928                                 # It could have been uninstalled
11929                                 # by a concurrent process.
11930                                 continue
11931
11932                         if unmerge_action != "clean" and \
11933                                 root_config.root == "/" and \
11934                                 portage.match_from_list(
11935                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11936                                 msg = ("Not unmerging package %s since there is no valid " + \
11937                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11938                                 for line in textwrap.wrap(msg, 75):
11939                                         out.eerror(line)
11940                                 # adjust pkgmap so the display output is correct
11941                                 pkgmap[cp]["selected"].remove(cpv)
11942                                 all_selected.remove(cpv)
11943                                 pkgmap[cp]["protected"].add(cpv)
11944                                 continue
11945
11946                         parents = []
11947                         for s in installed_sets:
11948                                 # skip sets that the user requested to unmerge, and skip world 
11949                                 # unless we're unmerging a package set (as the package would be 
11950                                 # removed from "world" later on)
11951                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11952                                         continue
11953
11954                                 if s not in sets:
11955                                         if s in unknown_sets:
11956                                                 continue
11957                                         unknown_sets.add(s)
11958                                         out = portage.output.EOutput()
11959                                         out.eerror(("Unknown set '@%s' in " + \
11960                                                 "%svar/lib/portage/world_sets") % \
11961                                                 (s, root_config.root))
11962                                         continue
11963
11964                                 # only check instances of EditablePackageSet as other classes are generally used for
11965                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11966                                 # user can't do much about them anyway)
11967                                 if isinstance(sets[s], EditablePackageSet):
11968
11969                                         # This is derived from a snippet of code in the
11970                                         # depgraph._iter_atoms_for_pkg() method.
11971                                         for atom in sets[s].iterAtomsForPackage(pkg):
11972                                                 inst_matches = vartree.dbapi.match(atom)
11973                                                 inst_matches.reverse() # descending order
11974                                                 higher_slot = None
11975                                                 for inst_cpv in inst_matches:
11976                                                         try:
11977                                                                 inst_pkg = _pkg(inst_cpv)
11978                                                         except KeyError:
11979                                                                 # It could have been uninstalled
11980                                                                 # by a concurrent process.
11981                                                                 continue
11982
11983                                                         if inst_pkg.cp != atom.cp:
11984                                                                 continue
11985                                                         if pkg >= inst_pkg:
11986                                                                 # This is descending order, and we're not
11987                                                                 # interested in any versions <= pkg given.
11988                                                                 break
11989                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11990                                                                 higher_slot = inst_pkg
11991                                                                 break
11992                                                 if higher_slot is None:
11993                                                         parents.append(s)
11994                                                         break
11995                         if parents:
11996                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11997                                 #print colorize("WARN", "but still listed in the following package sets:")
11998                                 #print "    %s\n" % ", ".join(parents)
11999                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12000                                 print colorize("WARN", "still referenced by the following package sets:")
12001                                 print "    %s\n" % ", ".join(parents)
12002                                 # adjust pkgmap so the display output is correct
12003                                 pkgmap[cp]["selected"].remove(cpv)
12004                                 all_selected.remove(cpv)
12005                                 pkgmap[cp]["protected"].add(cpv)
12006         
12007         del installed_sets
12008
12009         numselected = len(all_selected)
12010         if not numselected:
12011                 writemsg_level(
12012                         "\n>>> No packages selected for removal by " + \
12013                         unmerge_action + "\n")
12014                 return 0
12015
12016         # Unmerge order only matters in some cases
12017         if not ordered:
12018                 unordered = {}
12019                 for d in pkgmap:
12020                         selected = d["selected"]
12021                         if not selected:
12022                                 continue
12023                         cp = portage.cpv_getkey(iter(selected).next())
12024                         cp_dict = unordered.get(cp)
12025                         if cp_dict is None:
12026                                 cp_dict = {}
12027                                 unordered[cp] = cp_dict
12028                                 for k in d:
12029                                         cp_dict[k] = set()
12030                         for k, v in d.iteritems():
12031                                 cp_dict[k].update(v)
12032                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12033
12034         for x in xrange(len(pkgmap)):
12035                 selected = pkgmap[x]["selected"]
12036                 if not selected:
12037                         continue
12038                 for mytype, mylist in pkgmap[x].iteritems():
12039                         if mytype == "selected":
12040                                 continue
12041                         mylist.difference_update(all_selected)
12042                 cp = portage.cpv_getkey(iter(selected).next())
12043                 for y in localtree.dep_match(cp):
12044                         if y not in pkgmap[x]["omitted"] and \
12045                                 y not in pkgmap[x]["selected"] and \
12046                                 y not in pkgmap[x]["protected"] and \
12047                                 y not in all_selected:
12048                                 pkgmap[x]["omitted"].add(y)
12049                 if global_unmerge and not pkgmap[x]["selected"]:
12050                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12051                         continue
12052                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12053                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12054                                 "'%s' is part of your system profile.\n" % cp),
12055                                 level=logging.WARNING, noiselevel=-1)
12056                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12057                                 "be damaging to your system.\n\n"),
12058                                 level=logging.WARNING, noiselevel=-1)
12059                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12060                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12061                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12062                 if not quiet:
12063                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12064                 else:
12065                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12066                 for mytype in ["selected","protected","omitted"]:
12067                         if not quiet:
12068                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12069                         if pkgmap[x][mytype]:
12070                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12071                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12072                                 for pn, ver, rev in sorted_pkgs:
12073                                         if rev == "r0":
12074                                                 myversion = ver
12075                                         else:
12076                                                 myversion = ver + "-" + rev
12077                                         if mytype == "selected":
12078                                                 writemsg_level(
12079                                                         colorize("UNMERGE_WARN", myversion + " "),
12080                                                         noiselevel=-1)
12081                                         else:
12082                                                 writemsg_level(
12083                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12084                         else:
12085                                 writemsg_level("none ", noiselevel=-1)
12086                         if not quiet:
12087                                 writemsg_level("\n", noiselevel=-1)
12088                 if quiet:
12089                         writemsg_level("\n", noiselevel=-1)
12090
12091         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12092                 " packages are slated for removal.\n")
12093         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12094                         " and " + colorize("GOOD", "'omitted'") + \
12095                         " packages will not be removed.\n\n")
12096
12097         if "--pretend" in myopts:
12098                 #we're done... return
12099                 return 0
12100         if "--ask" in myopts:
12101                 if userquery("Would you like to unmerge these packages?")=="No":
12102                         # enter pretend mode for correct formatting of results
12103                         myopts["--pretend"] = True
12104                         print
12105                         print "Quitting."
12106                         print
12107                         return 0
12108         #the real unmerging begins, after a short delay....
12109         if clean_delay and not autoclean:
12110                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12111
12112         for x in xrange(len(pkgmap)):
12113                 for y in pkgmap[x]["selected"]:
12114                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12115                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12116                         mysplit = y.split("/")
12117                         #unmerge...
12118                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12119                                 mysettings, unmerge_action not in ["clean","prune"],
12120                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12121                                 scheduler=scheduler)
12122
12123                         if retval != os.EX_OK:
12124                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12125                                 if raise_on_error:
12126                                         raise UninstallFailure(retval)
12127                                 sys.exit(retval)
12128                         else:
12129                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12130                                         sets["world"].cleanPackage(vartree.dbapi, y)
12131                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12132         if clean_world and hasattr(sets["world"], "remove"):
12133                 for s in root_config.setconfig.active:
12134                         sets["world"].remove(SETPREFIX+s)
12135         return 1
12136
12137 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12138
12139         if os.path.exists("/usr/bin/install-info"):
12140                 out = portage.output.EOutput()
12141                 regen_infodirs=[]
12142                 for z in infodirs:
12143                         if z=='':
12144                                 continue
12145                         inforoot=normpath(root+z)
12146                         if os.path.isdir(inforoot):
12147                                 infomtime = long(os.stat(inforoot).st_mtime)
12148                                 if inforoot not in prev_mtimes or \
12149                                         prev_mtimes[inforoot] != infomtime:
12150                                                 regen_infodirs.append(inforoot)
12151
12152                 if not regen_infodirs:
12153                         portage.writemsg_stdout("\n")
12154                         out.einfo("GNU info directory index is up-to-date.")
12155                 else:
12156                         portage.writemsg_stdout("\n")
12157                         out.einfo("Regenerating GNU info directory index...")
12158
12159                         dir_extensions = ("", ".gz", ".bz2")
12160                         icount=0
12161                         badcount=0
12162                         errmsg = ""
12163                         for inforoot in regen_infodirs:
12164                                 if inforoot=='':
12165                                         continue
12166
12167                                 if not os.path.isdir(inforoot) or \
12168                                         not os.access(inforoot, os.W_OK):
12169                                         continue
12170
12171                                 file_list = os.listdir(inforoot)
12172                                 file_list.sort()
12173                                 dir_file = os.path.join(inforoot, "dir")
12174                                 moved_old_dir = False
12175                                 processed_count = 0
12176                                 for x in file_list:
12177                                         if x.startswith(".") or \
12178                                                 os.path.isdir(os.path.join(inforoot, x)):
12179                                                 continue
12180                                         if x.startswith("dir"):
12181                                                 skip = False
12182                                                 for ext in dir_extensions:
12183                                                         if x == "dir" + ext or \
12184                                                                 x == "dir" + ext + ".old":
12185                                                                 skip = True
12186                                                                 break
12187                                                 if skip:
12188                                                         continue
12189                                         if processed_count == 0:
12190                                                 for ext in dir_extensions:
12191                                                         try:
12192                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12193                                                                 moved_old_dir = True
12194                                                         except EnvironmentError, e:
12195                                                                 if e.errno != errno.ENOENT:
12196                                                                         raise
12197                                                                 del e
12198                                         processed_count += 1
12199                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12200                                         existsstr="already exists, for file `"
12201                                         if myso!="":
12202                                                 if re.search(existsstr,myso):
12203                                                         # Already exists... Don't increment the count for this.
12204                                                         pass
12205                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12206                                                         # This info file doesn't contain a DIR-header: install-info produces this
12207                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12208                                                         # Don't increment the count for this.
12209                                                         pass
12210                                                 else:
12211                                                         badcount=badcount+1
12212                                                         errmsg += myso + "\n"
12213                                         icount=icount+1
12214
12215                                 if moved_old_dir and not os.path.exists(dir_file):
12216                                         # We didn't generate a new dir file, so put the old file
12217                                         # back where it was originally found.
12218                                         for ext in dir_extensions:
12219                                                 try:
12220                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12221                                                 except EnvironmentError, e:
12222                                                         if e.errno != errno.ENOENT:
12223                                                                 raise
12224                                                         del e
12225
12226                                 # Clean dir.old cruft so that they don't prevent
12227                                 # unmerge of otherwise empty directories.
12228                                 for ext in dir_extensions:
12229                                         try:
12230                                                 os.unlink(dir_file + ext + ".old")
12231                                         except EnvironmentError, e:
12232                                                 if e.errno != errno.ENOENT:
12233                                                         raise
12234                                                 del e
12235
12236                                 #update mtime so we can potentially avoid regenerating.
12237                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12238
12239                         if badcount:
12240                                 out.eerror("Processed %d info files; %d errors." % \
12241                                         (icount, badcount))
12242                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12243                         else:
12244                                 if icount > 0:
12245                                         out.einfo("Processed %d info files." % (icount,))
12246
12247
12248 def display_news_notification(root_config, myopts):
12249         target_root = root_config.root
12250         trees = root_config.trees
12251         settings = trees["vartree"].settings
12252         portdb = trees["porttree"].dbapi
12253         vardb = trees["vartree"].dbapi
12254         NEWS_PATH = os.path.join("metadata", "news")
12255         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12256         newsReaderDisplay = False
12257         update = "--pretend" not in myopts
12258
12259         for repo in portdb.getRepositories():
12260                 unreadItems = checkUpdatedNewsItems(
12261                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12262                 if unreadItems:
12263                         if not newsReaderDisplay:
12264                                 newsReaderDisplay = True
12265                                 print
12266                         print colorize("WARN", " * IMPORTANT:"),
12267                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12268                         
12269         
12270         if newsReaderDisplay:
12271                 print colorize("WARN", " *"),
12272                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12273                 print
12274
12275 def display_preserved_libs(vardbapi):
12276         MAX_DISPLAY = 3
12277
12278         # Ensure the registry is consistent with existing files.
12279         vardbapi.plib_registry.pruneNonExisting()
12280
12281         if vardbapi.plib_registry.hasEntries():
12282                 print
12283                 print colorize("WARN", "!!!") + " existing preserved libs:"
12284                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12285                 linkmap = vardbapi.linkmap
12286                 consumer_map = {}
12287                 owners = {}
12288                 linkmap_broken = False
12289
12290                 try:
12291                         linkmap.rebuild()
12292                 except portage.exception.CommandNotFound, e:
12293                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12294                                 level=logging.ERROR, noiselevel=-1)
12295                         del e
12296                         linkmap_broken = True
12297                 else:
12298                         search_for_owners = set()
12299                         for cpv in plibdata:
12300                                 internal_plib_keys = set(linkmap._obj_key(f) \
12301                                         for f in plibdata[cpv])
12302                                 for f in plibdata[cpv]:
12303                                         if f in consumer_map:
12304                                                 continue
12305                                         consumers = []
12306                                         for c in linkmap.findConsumers(f):
12307                                                 # Filter out any consumers that are also preserved libs
12308                                                 # belonging to the same package as the provider.
12309                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12310                                                         consumers.append(c)
12311                                         consumers.sort()
12312                                         consumer_map[f] = consumers
12313                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12314
12315                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12316
12317                 for cpv in plibdata:
12318                         print colorize("WARN", ">>>") + " package: %s" % cpv
12319                         samefile_map = {}
12320                         for f in plibdata[cpv]:
12321                                 obj_key = linkmap._obj_key(f)
12322                                 alt_paths = samefile_map.get(obj_key)
12323                                 if alt_paths is None:
12324                                         alt_paths = set()
12325                                         samefile_map[obj_key] = alt_paths
12326                                 alt_paths.add(f)
12327
12328                         for alt_paths in samefile_map.itervalues():
12329                                 alt_paths = sorted(alt_paths)
12330                                 for p in alt_paths:
12331                                         print colorize("WARN", " * ") + " - %s" % (p,)
12332                                 f = alt_paths[0]
12333                                 consumers = consumer_map.get(f, [])
12334                                 for c in consumers[:MAX_DISPLAY]:
12335                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12336                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12337                                 if len(consumers) == MAX_DISPLAY + 1:
12338                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12339                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12340                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12341                                 elif len(consumers) > MAX_DISPLAY:
12342                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12343                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12344
12345
12346 def _flush_elog_mod_echo():
12347         """
12348         Dump the mod_echo output now so that our other
12349         notifications are shown last.
12350         @rtype: bool
12351         @returns: True if messages were shown, False otherwise.
12352         """
12353         messages_shown = False
12354         try:
12355                 from portage.elog import mod_echo
12356         except ImportError:
12357                 pass # happens during downgrade to a version without the module
12358         else:
12359                 messages_shown = bool(mod_echo._items)
12360                 mod_echo.finalize()
12361         return messages_shown
12362
12363 def post_emerge(root_config, myopts, mtimedb, retval):
12364         """
12365         Misc. things to run at the end of a merge session.
12366         
12367         Update Info Files
12368         Update Config Files
12369         Update News Items
12370         Commit mtimeDB
12371         Display preserved libs warnings
12372         Exit Emerge
12373
12374         @param trees: A dictionary mapping each ROOT to it's package databases
12375         @type trees: dict
12376         @param mtimedb: The mtimeDB to store data needed across merge invocations
12377         @type mtimedb: MtimeDB class instance
12378         @param retval: Emerge's return value
12379         @type retval: Int
12380         @rype: None
12381         @returns:
12382         1.  Calls sys.exit(retval)
12383         """
12384
12385         target_root = root_config.root
12386         trees = { target_root : root_config.trees }
12387         vardbapi = trees[target_root]["vartree"].dbapi
12388         settings = vardbapi.settings
12389         info_mtimes = mtimedb["info"]
12390
12391         # Load the most current variables from ${ROOT}/etc/profile.env
12392         settings.unlock()
12393         settings.reload()
12394         settings.regenerate()
12395         settings.lock()
12396
12397         config_protect = settings.get("CONFIG_PROTECT","").split()
12398         infodirs = settings.get("INFOPATH","").split(":") + \
12399                 settings.get("INFODIR","").split(":")
12400
12401         os.chdir("/")
12402
12403         if retval == os.EX_OK:
12404                 exit_msg = " *** exiting successfully."
12405         else:
12406                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12407         emergelog("notitles" not in settings.features, exit_msg)
12408
12409         _flush_elog_mod_echo()
12410
12411         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12412         if "--pretend" in myopts or (counter_hash is not None and \
12413                 counter_hash == vardbapi._counter_hash()):
12414                 display_news_notification(root_config, myopts)
12415                 # If vdb state has not changed then there's nothing else to do.
12416                 sys.exit(retval)
12417
12418         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12419         portage.util.ensure_dirs(vdb_path)
12420         vdb_lock = None
12421         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12422                 vdb_lock = portage.locks.lockdir(vdb_path)
12423
12424         if vdb_lock:
12425                 try:
12426                         if "noinfo" not in settings.features:
12427                                 chk_updated_info_files(target_root,
12428                                         infodirs, info_mtimes, retval)
12429                         mtimedb.commit()
12430                 finally:
12431                         if vdb_lock:
12432                                 portage.locks.unlockdir(vdb_lock)
12433
12434         chk_updated_cfg_files(target_root, config_protect)
12435         
12436         display_news_notification(root_config, myopts)
12437         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12438                 display_preserved_libs(vardbapi)        
12439
12440         sys.exit(retval)
12441
12442
12443 def chk_updated_cfg_files(target_root, config_protect):
12444         if config_protect:
12445                 #number of directories with some protect files in them
12446                 procount=0
12447                 for x in config_protect:
12448                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12449                         if not os.access(x, os.W_OK):
12450                                 # Avoid Permission denied errors generated
12451                                 # later by `find`.
12452                                 continue
12453                         try:
12454                                 mymode = os.lstat(x).st_mode
12455                         except OSError:
12456                                 continue
12457                         if stat.S_ISLNK(mymode):
12458                                 # We want to treat it like a directory if it
12459                                 # is a symlink to an existing directory.
12460                                 try:
12461                                         real_mode = os.stat(x).st_mode
12462                                         if stat.S_ISDIR(real_mode):
12463                                                 mymode = real_mode
12464                                 except OSError:
12465                                         pass
12466                         if stat.S_ISDIR(mymode):
12467                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12468                         else:
12469                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12470                                         os.path.split(x.rstrip(os.path.sep))
12471                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12472                         a = commands.getstatusoutput(mycommand)
12473                         if a[0] != 0:
12474                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12475                                 sys.stderr.flush()
12476                                 # Show the error message alone, sending stdout to /dev/null.
12477                                 os.system(mycommand + " 1>/dev/null")
12478                         else:
12479                                 files = a[1].split('\0')
12480                                 # split always produces an empty string as the last element
12481                                 if files and not files[-1]:
12482                                         del files[-1]
12483                                 if files:
12484                                         procount += 1
12485                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12486                                         if stat.S_ISDIR(mymode):
12487                                                  print "%d config files in '%s' need updating." % \
12488                                                         (len(files), x)
12489                                         else:
12490                                                  print "config file '%s' needs updating." % x
12491
12492                 if procount:
12493                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12494                                 " section of the " + bold("emerge")
12495                         print " "+yellow("*")+" man page to learn how to update config files."
12496
12497 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12498         update=False):
12499         """
12500         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12501         Returns the number of unread (yet relevent) items.
12502         
12503         @param portdb: a portage tree database
12504         @type portdb: pordbapi
12505         @param vardb: an installed package database
12506         @type vardb: vardbapi
12507         @param NEWS_PATH:
12508         @type NEWS_PATH:
12509         @param UNREAD_PATH:
12510         @type UNREAD_PATH:
12511         @param repo_id:
12512         @type repo_id:
12513         @rtype: Integer
12514         @returns:
12515         1.  The number of unread but relevant news items.
12516         
12517         """
12518         from portage.news import NewsManager
12519         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12520         return manager.getUnreadItems( repo_id, update=update )
12521
12522 def insert_category_into_atom(atom, category):
12523         alphanum = re.search(r'\w', atom)
12524         if alphanum:
12525                 ret = atom[:alphanum.start()] + "%s/" % category + \
12526                         atom[alphanum.start():]
12527         else:
12528                 ret = None
12529         return ret
12530
12531 def is_valid_package_atom(x):
12532         if "/" not in x:
12533                 alphanum = re.search(r'\w', x)
12534                 if alphanum:
12535                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12536         return portage.isvalidatom(x)
12537
12538 def show_blocker_docs_link():
12539         print
12540         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12541         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12542         print
12543         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12544         print
12545
12546 def show_mask_docs():
12547         print "For more information, see the MASKED PACKAGES section in the emerge"
12548         print "man page or refer to the Gentoo Handbook."
12549
12550 def action_sync(settings, trees, mtimedb, myopts, myaction):
12551         xterm_titles = "notitles" not in settings.features
12552         emergelog(xterm_titles, " === sync")
12553         myportdir = settings.get("PORTDIR", None)
12554         out = portage.output.EOutput()
12555         if not myportdir:
12556                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12557                 sys.exit(1)
12558         if myportdir[-1]=="/":
12559                 myportdir=myportdir[:-1]
12560         try:
12561                 st = os.stat(myportdir)
12562         except OSError:
12563                 st = None
12564         if st is None:
12565                 print ">>>",myportdir,"not found, creating it."
12566                 os.makedirs(myportdir,0755)
12567                 st = os.stat(myportdir)
12568
12569         spawn_kwargs = {}
12570         spawn_kwargs["env"] = settings.environ()
12571         if 'usersync' in settings.features and \
12572                 portage.data.secpass >= 2 and \
12573                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12574                 st.st_gid != os.getgid() and st.st_mode & 0070):
12575                 try:
12576                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12577                 except KeyError:
12578                         pass
12579                 else:
12580                         # Drop privileges when syncing, in order to match
12581                         # existing uid/gid settings.
12582                         spawn_kwargs["uid"]    = st.st_uid
12583                         spawn_kwargs["gid"]    = st.st_gid
12584                         spawn_kwargs["groups"] = [st.st_gid]
12585                         spawn_kwargs["env"]["HOME"] = homedir
12586                         umask = 0002
12587                         if not st.st_mode & 0020:
12588                                 umask = umask | 0020
12589                         spawn_kwargs["umask"] = umask
12590
12591         syncuri = settings.get("SYNC", "").strip()
12592         if not syncuri:
12593                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12594                         noiselevel=-1, level=logging.ERROR)
12595                 return 1
12596
12597         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12598         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12599
12600         os.umask(0022)
12601         dosyncuri = syncuri
12602         updatecache_flg = False
12603         if myaction == "metadata":
12604                 print "skipping sync"
12605                 updatecache_flg = True
12606         elif ".git" in vcs_dirs:
12607                 # Update existing git repository, and ignore the syncuri. We are
12608                 # going to trust the user and assume that the user is in the branch
12609                 # that he/she wants updated. We'll let the user manage branches with
12610                 # git directly.
12611                 if portage.process.find_binary("git") is None:
12612                         msg = ["Command not found: git",
12613                         "Type \"emerge dev-util/git\" to enable git support."]
12614                         for l in msg:
12615                                 writemsg_level("!!! %s\n" % l,
12616                                         level=logging.ERROR, noiselevel=-1)
12617                         return 1
12618                 msg = ">>> Starting git pull in %s..." % myportdir
12619                 emergelog(xterm_titles, msg )
12620                 writemsg_level(msg + "\n")
12621                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12622                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12623                 if exitcode != os.EX_OK:
12624                         msg = "!!! git pull error in %s." % myportdir
12625                         emergelog(xterm_titles, msg)
12626                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12627                         return exitcode
12628                 msg = ">>> Git pull in %s successful" % myportdir
12629                 emergelog(xterm_titles, msg)
12630                 writemsg_level(msg + "\n")
12631                 exitcode = git_sync_timestamps(settings, myportdir)
12632                 if exitcode == os.EX_OK:
12633                         updatecache_flg = True
12634         elif syncuri[:8]=="rsync://":
12635                 for vcs_dir in vcs_dirs:
12636                         writemsg_level(("!!! %s appears to be under revision " + \
12637                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12638                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12639                         return 1
12640                 if not os.path.exists("/usr/bin/rsync"):
12641                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12642                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12643                         sys.exit(1)
12644                 mytimeout=180
12645
12646                 rsync_opts = []
12647                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12648                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12649                         rsync_opts.extend([
12650                                 "--recursive",    # Recurse directories
12651                                 "--links",        # Consider symlinks
12652                                 "--safe-links",   # Ignore links outside of tree
12653                                 "--perms",        # Preserve permissions
12654                                 "--times",        # Preserive mod times
12655                                 "--compress",     # Compress the data transmitted
12656                                 "--force",        # Force deletion on non-empty dirs
12657                                 "--whole-file",   # Don't do block transfers, only entire files
12658                                 "--delete",       # Delete files that aren't in the master tree
12659                                 "--stats",        # Show final statistics about what was transfered
12660                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12661                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12662                                 "--exclude=/local",       # Exclude local     from consideration
12663                                 "--exclude=/packages",    # Exclude packages  from consideration
12664                         ])
12665
12666                 else:
12667                         # The below validation is not needed when using the above hardcoded
12668                         # defaults.
12669
12670                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12671                         rsync_opts.extend(
12672                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12673                         for opt in ("--recursive", "--times"):
12674                                 if opt not in rsync_opts:
12675                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12676                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12677                                         rsync_opts.append(opt)
12678         
12679                         for exclude in ("distfiles", "local", "packages"):
12680                                 opt = "--exclude=/%s" % exclude
12681                                 if opt not in rsync_opts:
12682                                         portage.writemsg(yellow("WARNING:") + \
12683                                         " adding required option %s not included in "  % opt + \
12684                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12685                                         rsync_opts.append(opt)
12686         
12687                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12688                                 def rsync_opt_startswith(opt_prefix):
12689                                         for x in rsync_opts:
12690                                                 if x.startswith(opt_prefix):
12691                                                         return True
12692                                         return False
12693
12694                                 if not rsync_opt_startswith("--timeout="):
12695                                         rsync_opts.append("--timeout=%d" % mytimeout)
12696
12697                                 for opt in ("--compress", "--whole-file"):
12698                                         if opt not in rsync_opts:
12699                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12700                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12701                                                 rsync_opts.append(opt)
12702
12703                 if "--quiet" in myopts:
12704                         rsync_opts.append("--quiet")    # Shut up a lot
12705                 else:
12706                         rsync_opts.append("--verbose")  # Print filelist
12707
12708                 if "--verbose" in myopts:
12709                         rsync_opts.append("--progress")  # Progress meter for each file
12710
12711                 if "--debug" in myopts:
12712                         rsync_opts.append("--checksum") # Force checksum on all files
12713
12714                 # Real local timestamp file.
12715                 servertimestampfile = os.path.join(
12716                         myportdir, "metadata", "timestamp.chk")
12717
12718                 content = portage.util.grabfile(servertimestampfile)
12719                 mytimestamp = 0
12720                 if content:
12721                         try:
12722                                 mytimestamp = time.mktime(time.strptime(content[0],
12723                                         "%a, %d %b %Y %H:%M:%S +0000"))
12724                         except (OverflowError, ValueError):
12725                                 pass
12726                 del content
12727
12728                 try:
12729                         rsync_initial_timeout = \
12730                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12731                 except ValueError:
12732                         rsync_initial_timeout = 15
12733
12734                 try:
12735                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12736                 except SystemExit, e:
12737                         raise # Needed else can't exit
12738                 except:
12739                         maxretries=3 #default number of retries
12740
12741                 retries=0
12742                 user_name, hostname, port = re.split(
12743                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12744                 if port is None:
12745                         port=""
12746                 if user_name is None:
12747                         user_name=""
12748                 updatecache_flg=True
12749                 all_rsync_opts = set(rsync_opts)
12750                 extra_rsync_opts = shlex.split(
12751                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12752                 all_rsync_opts.update(extra_rsync_opts)
12753                 family = socket.AF_INET
12754                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12755                         family = socket.AF_INET
12756                 elif socket.has_ipv6 and \
12757                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12758                         family = socket.AF_INET6
12759                 ips=[]
12760                 SERVER_OUT_OF_DATE = -1
12761                 EXCEEDED_MAX_RETRIES = -2
12762                 while (1):
12763                         if ips:
12764                                 del ips[0]
12765                         if ips==[]:
12766                                 try:
12767                                         for addrinfo in socket.getaddrinfo(
12768                                                 hostname, None, family, socket.SOCK_STREAM):
12769                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12770                                                         # IPv6 addresses need to be enclosed in square brackets
12771                                                         ips.append("[%s]" % addrinfo[4][0])
12772                                                 else:
12773                                                         ips.append(addrinfo[4][0])
12774                                         from random import shuffle
12775                                         shuffle(ips)
12776                                 except SystemExit, e:
12777                                         raise # Needed else can't exit
12778                                 except Exception, e:
12779                                         print "Notice:",str(e)
12780                                         dosyncuri=syncuri
12781
12782                         if ips:
12783                                 try:
12784                                         dosyncuri = syncuri.replace(
12785                                                 "//" + user_name + hostname + port + "/",
12786                                                 "//" + user_name + ips[0] + port + "/", 1)
12787                                 except SystemExit, e:
12788                                         raise # Needed else can't exit
12789                                 except Exception, e:
12790                                         print "Notice:",str(e)
12791                                         dosyncuri=syncuri
12792
12793                         if (retries==0):
12794                                 if "--ask" in myopts:
12795                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12796                                                 print
12797                                                 print "Quitting."
12798                                                 print
12799                                                 sys.exit(0)
12800                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12801                                 if "--quiet" not in myopts:
12802                                         print ">>> Starting rsync with "+dosyncuri+"..."
12803                         else:
12804                                 emergelog(xterm_titles,
12805                                         ">>> Starting retry %d of %d with %s" % \
12806                                                 (retries,maxretries,dosyncuri))
12807                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12808
12809                         if mytimestamp != 0 and "--quiet" not in myopts:
12810                                 print ">>> Checking server timestamp ..."
12811
12812                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12813
12814                         if "--debug" in myopts:
12815                                 print rsynccommand
12816
12817                         exitcode = os.EX_OK
12818                         servertimestamp = 0
12819                         # Even if there's no timestamp available locally, fetch the
12820                         # timestamp anyway as an initial probe to verify that the server is
12821                         # responsive.  This protects us from hanging indefinitely on a
12822                         # connection attempt to an unresponsive server which rsync's
12823                         # --timeout option does not prevent.
12824                         if True:
12825                                 # Temporary file for remote server timestamp comparison.
12826                                 from tempfile import mkstemp
12827                                 fd, tmpservertimestampfile = mkstemp()
12828                                 os.close(fd)
12829                                 mycommand = rsynccommand[:]
12830                                 mycommand.append(dosyncuri.rstrip("/") + \
12831                                         "/metadata/timestamp.chk")
12832                                 mycommand.append(tmpservertimestampfile)
12833                                 content = None
12834                                 mypids = []
12835                                 try:
12836                                         def timeout_handler(signum, frame):
12837                                                 raise portage.exception.PortageException("timed out")
12838                                         signal.signal(signal.SIGALRM, timeout_handler)
12839                                         # Timeout here in case the server is unresponsive.  The
12840                                         # --timeout rsync option doesn't apply to the initial
12841                                         # connection attempt.
12842                                         if rsync_initial_timeout:
12843                                                 signal.alarm(rsync_initial_timeout)
12844                                         try:
12845                                                 mypids.extend(portage.process.spawn(
12846                                                         mycommand, env=settings.environ(), returnpid=True))
12847                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12848                                                 content = portage.grabfile(tmpservertimestampfile)
12849                                         finally:
12850                                                 if rsync_initial_timeout:
12851                                                         signal.alarm(0)
12852                                                 try:
12853                                                         os.unlink(tmpservertimestampfile)
12854                                                 except OSError:
12855                                                         pass
12856                                 except portage.exception.PortageException, e:
12857                                         # timed out
12858                                         print e
12859                                         del e
12860                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12861                                                 os.kill(mypids[0], signal.SIGTERM)
12862                                                 os.waitpid(mypids[0], 0)
12863                                         # This is the same code rsync uses for timeout.
12864                                         exitcode = 30
12865                                 else:
12866                                         if exitcode != os.EX_OK:
12867                                                 if exitcode & 0xff:
12868                                                         exitcode = (exitcode & 0xff) << 8
12869                                                 else:
12870                                                         exitcode = exitcode >> 8
12871                                 if mypids:
12872                                         portage.process.spawned_pids.remove(mypids[0])
12873                                 if content:
12874                                         try:
12875                                                 servertimestamp = time.mktime(time.strptime(
12876                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12877                                         except (OverflowError, ValueError):
12878                                                 pass
12879                                 del mycommand, mypids, content
12880                         if exitcode == os.EX_OK:
12881                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12882                                         emergelog(xterm_titles,
12883                                                 ">>> Cancelling sync -- Already current.")
12884                                         print
12885                                         print ">>>"
12886                                         print ">>> Timestamps on the server and in the local repository are the same."
12887                                         print ">>> Cancelling all further sync action. You are already up to date."
12888                                         print ">>>"
12889                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12890                                         print ">>>"
12891                                         print
12892                                         sys.exit(0)
12893                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12894                                         emergelog(xterm_titles,
12895                                                 ">>> Server out of date: %s" % dosyncuri)
12896                                         print
12897                                         print ">>>"
12898                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12899                                         print ">>>"
12900                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12901                                         print ">>>"
12902                                         print
12903                                         exitcode = SERVER_OUT_OF_DATE
12904                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12905                                         # actual sync
12906                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12907                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12908                                         if exitcode in [0,1,3,4,11,14,20,21]:
12909                                                 break
12910                         elif exitcode in [1,3,4,11,14,20,21]:
12911                                 break
12912                         else:
12913                                 # Code 2 indicates protocol incompatibility, which is expected
12914                                 # for servers with protocol < 29 that don't support
12915                                 # --prune-empty-directories.  Retry for a server that supports
12916                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12917                                 pass
12918
12919                         retries=retries+1
12920
12921                         if retries<=maxretries:
12922                                 print ">>> Retrying..."
12923                                 time.sleep(11)
12924                         else:
12925                                 # over retries
12926                                 # exit loop
12927                                 updatecache_flg=False
12928                                 exitcode = EXCEEDED_MAX_RETRIES
12929                                 break
12930
12931                 if (exitcode==0):
12932                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12933                 elif exitcode == SERVER_OUT_OF_DATE:
12934                         sys.exit(1)
12935                 elif exitcode == EXCEEDED_MAX_RETRIES:
12936                         sys.stderr.write(
12937                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12938                         sys.exit(1)
12939                 elif (exitcode>0):
12940                         msg = []
12941                         if exitcode==1:
12942                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12943                                 msg.append("that your SYNC statement is proper.")
12944                                 msg.append("SYNC=" + settings["SYNC"])
12945                         elif exitcode==11:
12946                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12947                                 msg.append("this means your disk is full, but can be caused by corruption")
12948                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12949                                 msg.append("and try again after the problem has been fixed.")
12950                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12951                         elif exitcode==20:
12952                                 msg.append("Rsync was killed before it finished.")
12953                         else:
12954                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12955                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12956                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12957                                 msg.append("temporary problem unless complications exist with your network")
12958                                 msg.append("(and possibly your system's filesystem) configuration.")
12959                         for line in msg:
12960                                 out.eerror(line)
12961                         sys.exit(exitcode)
12962         elif syncuri[:6]=="cvs://":
12963                 if not os.path.exists("/usr/bin/cvs"):
12964                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12965                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12966                         sys.exit(1)
12967                 cvsroot=syncuri[6:]
12968                 cvsdir=os.path.dirname(myportdir)
12969                 if not os.path.exists(myportdir+"/CVS"):
12970                         #initial checkout
12971                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12972                         if os.path.exists(cvsdir+"/gentoo-x86"):
12973                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12974                                 sys.exit(1)
12975                         try:
12976                                 os.rmdir(myportdir)
12977                         except OSError, e:
12978                                 if e.errno != errno.ENOENT:
12979                                         sys.stderr.write(
12980                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12981                                         sys.exit(1)
12982                                 del e
12983                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12984                                 print "!!! cvs checkout error; exiting."
12985                                 sys.exit(1)
12986                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12987                 else:
12988                         #cvs update
12989                         print ">>> Starting cvs update with "+syncuri+"..."
12990                         retval = portage.process.spawn_bash(
12991                                 "cd %s; cvs -z0 -q update -dP" % \
12992                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12993                         if retval != os.EX_OK:
12994                                 sys.exit(retval)
12995                 dosyncuri = syncuri
12996         else:
12997                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12998                         noiselevel=-1, level=logging.ERROR)
12999                 return 1
13000
13001         if updatecache_flg and  \
13002                 myaction != "metadata" and \
13003                 "metadata-transfer" not in settings.features:
13004                 updatecache_flg = False
13005
13006         # Reload the whole config from scratch.
13007         settings, trees, mtimedb = load_emerge_config(trees=trees)
13008         root_config = trees[settings["ROOT"]]["root_config"]
13009         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13010
13011         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13012                 action_metadata(settings, portdb, myopts)
13013
13014         if portage._global_updates(trees, mtimedb["updates"]):
13015                 mtimedb.commit()
13016                 # Reload the whole config from scratch.
13017                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13018                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13019                 root_config = trees[settings["ROOT"]]["root_config"]
13020
13021         mybestpv = portdb.xmatch("bestmatch-visible",
13022                 portage.const.PORTAGE_PACKAGE_ATOM)
13023         mypvs = portage.best(
13024                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13025                 portage.const.PORTAGE_PACKAGE_ATOM))
13026
13027         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13028
13029         if myaction != "metadata":
13030                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13031                         retval = portage.process.spawn(
13032                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13033                                 dosyncuri], env=settings.environ())
13034                         if retval != os.EX_OK:
13035                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13036
13037         if(mybestpv != mypvs) and not "--quiet" in myopts:
13038                 print
13039                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13040                 print red(" * ")+"that you update portage now, before any other packages are updated."
13041                 print
13042                 print red(" * ")+"To update portage, run 'emerge portage' now."
13043                 print
13044         
13045         display_news_notification(root_config, myopts)
13046         return os.EX_OK
13047
13048 def git_sync_timestamps(settings, portdir):
13049         """
13050         Since git doesn't preserve timestamps, synchronize timestamps between
13051         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13052         for a given file as long as the file in the working tree is not modified
13053         (relative to HEAD).
13054         """
13055         cache_dir = os.path.join(portdir, "metadata", "cache")
13056         if not os.path.isdir(cache_dir):
13057                 return os.EX_OK
13058         writemsg_level(">>> Synchronizing timestamps...\n")
13059
13060         from portage.cache.cache_errors import CacheError
13061         try:
13062                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13063                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13064         except CacheError, e:
13065                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13066                         level=logging.ERROR, noiselevel=-1)
13067                 return 1
13068
13069         ec_dir = os.path.join(portdir, "eclass")
13070         try:
13071                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13072                         if f.endswith(".eclass"))
13073         except OSError, e:
13074                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13075                         level=logging.ERROR, noiselevel=-1)
13076                 return 1
13077
13078         args = [portage.const.BASH_BINARY, "-c",
13079                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13080                 portage._shell_quote(portdir)]
13081         import subprocess
13082         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13083         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13084         rval = proc.wait()
13085         if rval != os.EX_OK:
13086                 return rval
13087
13088         modified_eclasses = set(ec for ec in ec_names \
13089                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13090
13091         updated_ec_mtimes = {}
13092
13093         for cpv in cache_db:
13094                 cpv_split = portage.catpkgsplit(cpv)
13095                 if cpv_split is None:
13096                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13097                                 level=logging.ERROR, noiselevel=-1)
13098                         continue
13099
13100                 cat, pn, ver, rev = cpv_split
13101                 cat, pf = portage.catsplit(cpv)
13102                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13103                 if relative_eb_path in modified_files:
13104                         continue
13105
13106                 try:
13107                         cache_entry = cache_db[cpv]
13108                         eb_mtime = cache_entry.get("_mtime_")
13109                         ec_mtimes = cache_entry.get("_eclasses_")
13110                 except KeyError:
13111                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13112                                 level=logging.ERROR, noiselevel=-1)
13113                         continue
13114                 except CacheError, e:
13115                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13116                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13117                         continue
13118
13119                 if eb_mtime is None:
13120                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13121                                 level=logging.ERROR, noiselevel=-1)
13122                         continue
13123
13124                 try:
13125                         eb_mtime = long(eb_mtime)
13126                 except ValueError:
13127                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13128                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13129                         continue
13130
13131                 if ec_mtimes is None:
13132                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13133                                 level=logging.ERROR, noiselevel=-1)
13134                         continue
13135
13136                 if modified_eclasses.intersection(ec_mtimes):
13137                         continue
13138
13139                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13140                 if missing_eclasses:
13141                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13142                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13143                                 noiselevel=-1)
13144                         continue
13145
13146                 eb_path = os.path.join(portdir, relative_eb_path)
13147                 try:
13148                         current_eb_mtime = os.stat(eb_path)
13149                 except OSError:
13150                         writemsg_level("!!! Missing ebuild: %s\n" % \
13151                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13152                         continue
13153
13154                 inconsistent = False
13155                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13156                         updated_mtime = updated_ec_mtimes.get(ec)
13157                         if updated_mtime is not None and updated_mtime != ec_mtime:
13158                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13159                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13160                                 inconsistent = True
13161                                 break
13162
13163                 if inconsistent:
13164                         continue
13165
13166                 if current_eb_mtime != eb_mtime:
13167                         os.utime(eb_path, (eb_mtime, eb_mtime))
13168
13169                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13170                         if ec in updated_ec_mtimes:
13171                                 continue
13172                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13173                         current_mtime = long(os.stat(ec_path).st_mtime)
13174                         if current_mtime != ec_mtime:
13175                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13176                         updated_ec_mtimes[ec] = ec_mtime
13177
13178         return os.EX_OK
13179
13180 def action_metadata(settings, portdb, myopts):
13181         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13182         old_umask = os.umask(0002)
13183         cachedir = os.path.normpath(settings.depcachedir)
13184         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13185                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13186                                         "/sys", "/tmp", "/usr",  "/var"]:
13187                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13188                         "ROOT DIRECTORY ON YOUR SYSTEM."
13189                 print >> sys.stderr, \
13190                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13191                 sys.exit(73)
13192         if not os.path.exists(cachedir):
13193                 os.mkdir(cachedir)
13194
13195         ec = portage.eclass_cache.cache(portdb.porttree_root)
13196         myportdir = os.path.realpath(settings["PORTDIR"])
13197         cm = settings.load_best_module("portdbapi.metadbmodule")(
13198                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13199
13200         from portage.cache import util
13201
13202         class percentage_noise_maker(util.quiet_mirroring):
13203                 def __init__(self, dbapi):
13204                         self.dbapi = dbapi
13205                         self.cp_all = dbapi.cp_all()
13206                         l = len(self.cp_all)
13207                         self.call_update_min = 100000000
13208                         self.min_cp_all = l/100.0
13209                         self.count = 1
13210                         self.pstr = ''
13211
13212                 def __iter__(self):
13213                         for x in self.cp_all:
13214                                 self.count += 1
13215                                 if self.count > self.min_cp_all:
13216                                         self.call_update_min = 0
13217                                         self.count = 0
13218                                 for y in self.dbapi.cp_list(x):
13219                                         yield y
13220                         self.call_update_mine = 0
13221
13222                 def update(self, *arg):
13223                         try:                            self.pstr = int(self.pstr) + 1
13224                         except ValueError:      self.pstr = 1
13225                         sys.stdout.write("%s%i%%" % \
13226                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13227                         sys.stdout.flush()
13228                         self.call_update_min = 10000000
13229
13230                 def finish(self, *arg):
13231                         sys.stdout.write("\b\b\b\b100%\n")
13232                         sys.stdout.flush()
13233
13234         if "--quiet" in myopts:
13235                 def quicky_cpv_generator(cp_all_list):
13236                         for x in cp_all_list:
13237                                 for y in portdb.cp_list(x):
13238                                         yield y
13239                 source = quicky_cpv_generator(portdb.cp_all())
13240                 noise_maker = portage.cache.util.quiet_mirroring()
13241         else:
13242                 noise_maker = source = percentage_noise_maker(portdb)
13243         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13244                 eclass_cache=ec, verbose_instance=noise_maker)
13245
13246         sys.stdout.flush()
13247         os.umask(old_umask)
13248
13249 def action_regen(settings, portdb, max_jobs, max_load):
13250         xterm_titles = "notitles" not in settings.features
13251         emergelog(xterm_titles, " === regen")
13252         #regenerate cache entries
13253         portage.writemsg_stdout("Regenerating cache entries...\n")
13254         try:
13255                 os.close(sys.stdin.fileno())
13256         except SystemExit, e:
13257                 raise # Needed else can't exit
13258         except:
13259                 pass
13260         sys.stdout.flush()
13261
13262         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13263         regen.run()
13264
13265         portage.writemsg_stdout("done!\n")
13266         return regen.returncode
13267
13268 def action_config(settings, trees, myopts, myfiles):
13269         if len(myfiles) != 1:
13270                 print red("!!! config can only take a single package atom at this time\n")
13271                 sys.exit(1)
13272         if not is_valid_package_atom(myfiles[0]):
13273                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13274                         noiselevel=-1)
13275                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13276                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13277                 sys.exit(1)
13278         print
13279         try:
13280                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13281         except portage.exception.AmbiguousPackageName, e:
13282                 # Multiple matches thrown from cpv_expand
13283                 pkgs = e.args[0]
13284         if len(pkgs) == 0:
13285                 print "No packages found.\n"
13286                 sys.exit(0)
13287         elif len(pkgs) > 1:
13288                 if "--ask" in myopts:
13289                         options = []
13290                         print "Please select a package to configure:"
13291                         idx = 0
13292                         for pkg in pkgs:
13293                                 idx += 1
13294                                 options.append(str(idx))
13295                                 print options[-1]+") "+pkg
13296                         print "X) Cancel"
13297                         options.append("X")
13298                         idx = userquery("Selection?", options)
13299                         if idx == "X":
13300                                 sys.exit(0)
13301                         pkg = pkgs[int(idx)-1]
13302                 else:
13303                         print "The following packages available:"
13304                         for pkg in pkgs:
13305                                 print "* "+pkg
13306                         print "\nPlease use a specific atom or the --ask option."
13307                         sys.exit(1)
13308         else:
13309                 pkg = pkgs[0]
13310
13311         print
13312         if "--ask" in myopts:
13313                 if userquery("Ready to configure "+pkg+"?") == "No":
13314                         sys.exit(0)
13315         else:
13316                 print "Configuring pkg..."
13317         print
13318         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13319         mysettings = portage.config(clone=settings)
13320         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13321         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13322         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13323                 mysettings,
13324                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13325                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13326         if retval == os.EX_OK:
13327                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13328                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13329         print
13330
13331 def action_info(settings, trees, myopts, myfiles):
13332         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13333                 settings.profile_path, settings["CHOST"],
13334                 trees[settings["ROOT"]]["vartree"].dbapi)
13335         header_width = 65
13336         header_title = "System Settings"
13337         if myfiles:
13338                 print header_width * "="
13339                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13340         print header_width * "="
13341         print "System uname: "+platform.platform(aliased=1)
13342
13343         lastSync = portage.grabfile(os.path.join(
13344                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13345         print "Timestamp of tree:",
13346         if lastSync:
13347                 print lastSync[0]
13348         else:
13349                 print "Unknown"
13350
13351         output=commands.getstatusoutput("distcc --version")
13352         if not output[0]:
13353                 print str(output[1].split("\n",1)[0]),
13354                 if "distcc" in settings.features:
13355                         print "[enabled]"
13356                 else:
13357                         print "[disabled]"
13358
13359         output=commands.getstatusoutput("ccache -V")
13360         if not output[0]:
13361                 print str(output[1].split("\n",1)[0]),
13362                 if "ccache" in settings.features:
13363                         print "[enabled]"
13364                 else:
13365                         print "[disabled]"
13366
13367         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13368                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13369         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13370         myvars  = portage.util.unique_array(myvars)
13371         myvars.sort()
13372
13373         for x in myvars:
13374                 if portage.isvalidatom(x):
13375                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13376                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13377                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13378                         pkgs = []
13379                         for pn, ver, rev in pkg_matches:
13380                                 if rev != "r0":
13381                                         pkgs.append(ver + "-" + rev)
13382                                 else:
13383                                         pkgs.append(ver)
13384                         if pkgs:
13385                                 pkgs = ", ".join(pkgs)
13386                                 print "%-20s %s" % (x+":", pkgs)
13387                 else:
13388                         print "%-20s %s" % (x+":", "[NOT VALID]")
13389
13390         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13391
13392         if "--verbose" in myopts:
13393                 myvars=settings.keys()
13394         else:
13395                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13396                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13397                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13398                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13399
13400                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13401
13402         myvars = portage.util.unique_array(myvars)
13403         unset_vars = []
13404         myvars.sort()
13405         for x in myvars:
13406                 if x in settings:
13407                         if x != "USE":
13408                                 print '%s="%s"' % (x, settings[x])
13409                         else:
13410                                 use = set(settings["USE"].split())
13411                                 use_expand = settings["USE_EXPAND"].split()
13412                                 use_expand.sort()
13413                                 for varname in use_expand:
13414                                         flag_prefix = varname.lower() + "_"
13415                                         for f in list(use):
13416                                                 if f.startswith(flag_prefix):
13417                                                         use.remove(f)
13418                                 use = list(use)
13419                                 use.sort()
13420                                 print 'USE="%s"' % " ".join(use),
13421                                 for varname in use_expand:
13422                                         myval = settings.get(varname)
13423                                         if myval:
13424                                                 print '%s="%s"' % (varname, myval),
13425                                 print
13426                 else:
13427                         unset_vars.append(x)
13428         if unset_vars:
13429                 print "Unset:  "+", ".join(unset_vars)
13430         print
13431
13432         if "--debug" in myopts:
13433                 for x in dir(portage):
13434                         module = getattr(portage, x)
13435                         if "cvs_id_string" in dir(module):
13436                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13437
13438         # See if we can find any packages installed matching the strings
13439         # passed on the command line
13440         mypkgs = []
13441         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13442         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13443         for x in myfiles:
13444                 mypkgs.extend(vardb.match(x))
13445
13446         # If some packages were found...
13447         if mypkgs:
13448                 # Get our global settings (we only print stuff if it varies from
13449                 # the current config)
13450                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13451                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13452                 global_vals = {}
13453                 pkgsettings = portage.config(clone=settings)
13454
13455                 for myvar in mydesiredvars:
13456                         global_vals[myvar] = set(settings.get(myvar, "").split())
13457
13458                 # Loop through each package
13459                 # Only print settings if they differ from global settings
13460                 header_title = "Package Settings"
13461                 print header_width * "="
13462                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13463                 print header_width * "="
13464                 from portage.output import EOutput
13465                 out = EOutput()
13466                 for pkg in mypkgs:
13467                         # Get all package specific variables
13468                         auxvalues = vardb.aux_get(pkg, auxkeys)
13469                         valuesmap = {}
13470                         for i in xrange(len(auxkeys)):
13471                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13472                         diff_values = {}
13473                         for myvar in mydesiredvars:
13474                                 # If the package variable doesn't match the
13475                                 # current global variable, something has changed
13476                                 # so set diff_found so we know to print
13477                                 if valuesmap[myvar] != global_vals[myvar]:
13478                                         diff_values[myvar] = valuesmap[myvar]
13479                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13480                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13481                         pkgsettings.reset()
13482                         # If a matching ebuild is no longer available in the tree, maybe it
13483                         # would make sense to compare against the flags for the best
13484                         # available version with the same slot?
13485                         mydb = None
13486                         if portdb.cpv_exists(pkg):
13487                                 mydb = portdb
13488                         pkgsettings.setcpv(pkg, mydb=mydb)
13489                         if valuesmap["IUSE"].intersection(
13490                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13491                                 diff_values["USE"] = valuesmap["USE"]
13492                         # If a difference was found, print the info for
13493                         # this package.
13494                         if diff_values:
13495                                 # Print package info
13496                                 print "%s was built with the following:" % pkg
13497                                 for myvar in mydesiredvars + ["USE"]:
13498                                         if myvar in diff_values:
13499                                                 mylist = list(diff_values[myvar])
13500                                                 mylist.sort()
13501                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13502                                 print
13503                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13504                         ebuildpath = vardb.findname(pkg)
13505                         if not ebuildpath or not os.path.exists(ebuildpath):
13506                                 out.ewarn("No ebuild found for '%s'" % pkg)
13507                                 continue
13508                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13509                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13510                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13511                                 tree="vartree")
13512
13513 def action_search(root_config, myopts, myfiles, spinner):
13514         if not myfiles:
13515                 print "emerge: no search terms provided."
13516         else:
13517                 searchinstance = search(root_config,
13518                         spinner, "--searchdesc" in myopts,
13519                         "--quiet" not in myopts, "--usepkg" in myopts,
13520                         "--usepkgonly" in myopts)
13521                 for mysearch in myfiles:
13522                         try:
13523                                 searchinstance.execute(mysearch)
13524                         except re.error, comment:
13525                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13526                                 sys.exit(1)
13527                         searchinstance.output()
13528
13529 def action_depclean(settings, trees, ldpath_mtimes,
13530         myopts, action, myfiles, spinner):
13531         # Kill packages that aren't explicitly merged or are required as a
13532         # dependency of another package. World file is explicit.
13533
13534         # Global depclean or prune operations are not very safe when there are
13535         # missing dependencies since it's unknown how badly incomplete
13536         # the dependency graph is, and we might accidentally remove packages
13537         # that should have been pulled into the graph. On the other hand, it's
13538         # relatively safe to ignore missing deps when only asked to remove
13539         # specific packages.
13540         allow_missing_deps = len(myfiles) > 0
13541
13542         msg = []
13543         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13544         msg.append("mistakes. Packages that are part of the world set will always\n")
13545         msg.append("be kept.  They can be manually added to this set with\n")
13546         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13547         msg.append("package.provided (see portage(5)) will be removed by\n")
13548         msg.append("depclean, even if they are part of the world set.\n")
13549         msg.append("\n")
13550         msg.append("As a safety measure, depclean will not remove any packages\n")
13551         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13552         msg.append("consequence, it is often necessary to run %s\n" % \
13553                 good("`emerge --update"))
13554         msg.append(good("--newuse --deep @system @world`") + \
13555                 " prior to depclean.\n")
13556
13557         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13558                 portage.writemsg_stdout("\n")
13559                 for x in msg:
13560                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13561
13562         xterm_titles = "notitles" not in settings.features
13563         myroot = settings["ROOT"]
13564         root_config = trees[myroot]["root_config"]
13565         getSetAtoms = root_config.setconfig.getSetAtoms
13566         vardb = trees[myroot]["vartree"].dbapi
13567
13568         required_set_names = ("system", "world")
13569         required_sets = {}
13570         set_args = []
13571
13572         for s in required_set_names:
13573                 required_sets[s] = InternalPackageSet(
13574                         initial_atoms=getSetAtoms(s))
13575
13576         
13577         # When removing packages, use a temporary version of world
13578         # which excludes packages that are intended to be eligible for
13579         # removal.
13580         world_temp_set = required_sets["world"]
13581         system_set = required_sets["system"]
13582
13583         if not system_set or not world_temp_set:
13584
13585                 if not system_set:
13586                         writemsg_level("!!! You have no system list.\n",
13587                                 level=logging.ERROR, noiselevel=-1)
13588
13589                 if not world_temp_set:
13590                         writemsg_level("!!! You have no world file.\n",
13591                                         level=logging.WARNING, noiselevel=-1)
13592
13593                 writemsg_level("!!! Proceeding is likely to " + \
13594                         "break your installation.\n",
13595                         level=logging.WARNING, noiselevel=-1)
13596                 if "--pretend" not in myopts:
13597                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13598
13599         if action == "depclean":
13600                 emergelog(xterm_titles, " >>> depclean")
13601
13602         import textwrap
13603         args_set = InternalPackageSet()
13604         if myfiles:
13605                 for x in myfiles:
13606                         if not is_valid_package_atom(x):
13607                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13608                                         level=logging.ERROR, noiselevel=-1)
13609                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13610                                 return
13611                         try:
13612                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13613                         except portage.exception.AmbiguousPackageName, e:
13614                                 msg = "The short ebuild name \"" + x + \
13615                                         "\" is ambiguous.  Please specify " + \
13616                                         "one of the following " + \
13617                                         "fully-qualified ebuild names instead:"
13618                                 for line in textwrap.wrap(msg, 70):
13619                                         writemsg_level("!!! %s\n" % (line,),
13620                                                 level=logging.ERROR, noiselevel=-1)
13621                                 for i in e[0]:
13622                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13623                                                 level=logging.ERROR, noiselevel=-1)
13624                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13625                                 return
13626                         args_set.add(atom)
13627                 matched_packages = False
13628                 for x in args_set:
13629                         if vardb.match(x):
13630                                 matched_packages = True
13631                                 break
13632                 if not matched_packages:
13633                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13634                                 action)
13635                         return
13636
13637         writemsg_level("\nCalculating dependencies  ")
13638         resolver_params = create_depgraph_params(myopts, "remove")
13639         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13640         vardb = resolver.trees[myroot]["vartree"].dbapi
13641
13642         if action == "depclean":
13643
13644                 if args_set:
13645                         # Pull in everything that's installed but not matched
13646                         # by an argument atom since we don't want to clean any
13647                         # package if something depends on it.
13648
13649                         world_temp_set.clear()
13650                         for pkg in vardb:
13651                                 spinner.update()
13652
13653                                 try:
13654                                         if args_set.findAtomForPackage(pkg) is None:
13655                                                 world_temp_set.add("=" + pkg.cpv)
13656                                                 continue
13657                                 except portage.exception.InvalidDependString, e:
13658                                         show_invalid_depstring_notice(pkg,
13659                                                 pkg.metadata["PROVIDE"], str(e))
13660                                         del e
13661                                         world_temp_set.add("=" + pkg.cpv)
13662                                         continue
13663
13664         elif action == "prune":
13665
13666                 # Pull in everything that's installed since we don't
13667                 # to prune a package if something depends on it.
13668                 world_temp_set.clear()
13669                 world_temp_set.update(vardb.cp_all())
13670
13671                 if not args_set:
13672
13673                         # Try to prune everything that's slotted.
13674                         for cp in vardb.cp_all():
13675                                 if len(vardb.cp_list(cp)) > 1:
13676                                         args_set.add(cp)
13677
13678                 # Remove atoms from world that match installed packages
13679                 # that are also matched by argument atoms, but do not remove
13680                 # them if they match the highest installed version.
13681                 for pkg in vardb:
13682                         spinner.update()
13683                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13684                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13685                                 raise AssertionError("package expected in matches: " + \
13686                                         "cp = %s, cpv = %s matches = %s" % \
13687                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13688
13689                         highest_version = pkgs_for_cp[-1]
13690                         if pkg == highest_version:
13691                                 # pkg is the highest version
13692                                 world_temp_set.add("=" + pkg.cpv)
13693                                 continue
13694
13695                         if len(pkgs_for_cp) <= 1:
13696                                 raise AssertionError("more packages expected: " + \
13697                                         "cp = %s, cpv = %s matches = %s" % \
13698                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13699
13700                         try:
13701                                 if args_set.findAtomForPackage(pkg) is None:
13702                                         world_temp_set.add("=" + pkg.cpv)
13703                                         continue
13704                         except portage.exception.InvalidDependString, e:
13705                                 show_invalid_depstring_notice(pkg,
13706                                         pkg.metadata["PROVIDE"], str(e))
13707                                 del e
13708                                 world_temp_set.add("=" + pkg.cpv)
13709                                 continue
13710
13711         set_args = {}
13712         for s, package_set in required_sets.iteritems():
13713                 set_atom = SETPREFIX + s
13714                 set_arg = SetArg(arg=set_atom, set=package_set,
13715                         root_config=resolver.roots[myroot])
13716                 set_args[s] = set_arg
13717                 for atom in set_arg.set:
13718                         resolver._dep_stack.append(
13719                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13720                         resolver.digraph.add(set_arg, None)
13721
13722         success = resolver._complete_graph()
13723         writemsg_level("\b\b... done!\n")
13724
13725         resolver.display_problems()
13726
13727         if not success:
13728                 return 1
13729
13730         def unresolved_deps():
13731
13732                 unresolvable = set()
13733                 for dep in resolver._initially_unsatisfied_deps:
13734                         if isinstance(dep.parent, Package) and \
13735                                 (dep.priority > UnmergeDepPriority.SOFT):
13736                                 unresolvable.add((dep.atom, dep.parent.cpv))
13737
13738                 if not unresolvable:
13739                         return False
13740
13741                 if unresolvable and not allow_missing_deps:
13742                         prefix = bad(" * ")
13743                         msg = []
13744                         msg.append("Dependencies could not be completely resolved due to")
13745                         msg.append("the following required packages not being installed:")
13746                         msg.append("")
13747                         for atom, parent in unresolvable:
13748                                 msg.append("  %s pulled in by:" % (atom,))
13749                                 msg.append("    %s" % (parent,))
13750                                 msg.append("")
13751                         msg.append("Have you forgotten to run " + \
13752                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13753                         msg.append(("to %s? It may be necessary to manually " + \
13754                                 "uninstall packages that no longer") % action)
13755                         msg.append("exist in the portage tree since " + \
13756                                 "it may not be possible to satisfy their")
13757                         msg.append("dependencies.  Also, be aware of " + \
13758                                 "the --with-bdeps option that is documented")
13759                         msg.append("in " + good("`man emerge`") + ".")
13760                         if action == "prune":
13761                                 msg.append("")
13762                                 msg.append("If you would like to ignore " + \
13763                                         "dependencies then use %s." % good("--nodeps"))
13764                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13765                                 level=logging.ERROR, noiselevel=-1)
13766                         return True
13767                 return False
13768
13769         if unresolved_deps():
13770                 return 1
13771
13772         graph = resolver.digraph.copy()
13773         required_pkgs_total = 0
13774         for node in graph:
13775                 if isinstance(node, Package):
13776                         required_pkgs_total += 1
13777
13778         def show_parents(child_node):
13779                 parent_nodes = graph.parent_nodes(child_node)
13780                 if not parent_nodes:
13781                         # With --prune, the highest version can be pulled in without any
13782                         # real parent since all installed packages are pulled in.  In that
13783                         # case there's nothing to show here.
13784                         return
13785                 parent_strs = []
13786                 for node in parent_nodes:
13787                         parent_strs.append(str(getattr(node, "cpv", node)))
13788                 parent_strs.sort()
13789                 msg = []
13790                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13791                 for parent_str in parent_strs:
13792                         msg.append("    %s\n" % (parent_str,))
13793                 msg.append("\n")
13794                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13795
13796         def cmp_pkg_cpv(pkg1, pkg2):
13797                 """Sort Package instances by cpv."""
13798                 if pkg1.cpv > pkg2.cpv:
13799                         return 1
13800                 elif pkg1.cpv == pkg2.cpv:
13801                         return 0
13802                 else:
13803                         return -1
13804
13805         def create_cleanlist():
13806                 pkgs_to_remove = []
13807
13808                 if action == "depclean":
13809                         if args_set:
13810
13811                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13812                                         arg_atom = None
13813                                         try:
13814                                                 arg_atom = args_set.findAtomForPackage(pkg)
13815                                         except portage.exception.InvalidDependString:
13816                                                 # this error has already been displayed by now
13817                                                 continue
13818
13819                                         if arg_atom:
13820                                                 if pkg not in graph:
13821                                                         pkgs_to_remove.append(pkg)
13822                                                 elif "--verbose" in myopts:
13823                                                         show_parents(pkg)
13824
13825                         else:
13826                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13827                                         if pkg not in graph:
13828                                                 pkgs_to_remove.append(pkg)
13829                                         elif "--verbose" in myopts:
13830                                                 show_parents(pkg)
13831
13832                 elif action == "prune":
13833                         # Prune really uses all installed instead of world. It's not
13834                         # a real reverse dependency so don't display it as such.
13835                         graph.remove(set_args["world"])
13836
13837                         for atom in args_set:
13838                                 for pkg in vardb.match_pkgs(atom):
13839                                         if pkg not in graph:
13840                                                 pkgs_to_remove.append(pkg)
13841                                         elif "--verbose" in myopts:
13842                                                 show_parents(pkg)
13843
13844                 if not pkgs_to_remove:
13845                         writemsg_level(
13846                                 ">>> No packages selected for removal by %s\n" % action)
13847                         if "--verbose" not in myopts:
13848                                 writemsg_level(
13849                                         ">>> To see reverse dependencies, use %s\n" % \
13850                                                 good("--verbose"))
13851                         if action == "prune":
13852                                 writemsg_level(
13853                                         ">>> To ignore dependencies, use %s\n" % \
13854                                                 good("--nodeps"))
13855
13856                 return pkgs_to_remove
13857
13858         cleanlist = create_cleanlist()
13859
13860         if len(cleanlist):
13861                 clean_set = set(cleanlist)
13862
13863                 # Check if any of these package are the sole providers of libraries
13864                 # with consumers that have not been selected for removal. If so, these
13865                 # packages and any dependencies need to be added to the graph.
13866                 real_vardb = trees[myroot]["vartree"].dbapi
13867                 linkmap = real_vardb.linkmap
13868                 liblist = linkmap.listLibraryObjects()
13869                 consumer_cache = {}
13870                 provider_cache = {}
13871                 soname_cache = {}
13872                 consumer_map = {}
13873
13874                 writemsg_level(">>> Checking for lib consumers...\n")
13875
13876                 for pkg in cleanlist:
13877                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13878                         provided_libs = set()
13879
13880                         for lib in liblist:
13881                                 if pkg_dblink.isowner(lib, myroot):
13882                                         provided_libs.add(lib)
13883
13884                         if not provided_libs:
13885                                 continue
13886
13887                         consumers = {}
13888                         for lib in provided_libs:
13889                                 lib_consumers = consumer_cache.get(lib)
13890                                 if lib_consumers is None:
13891                                         lib_consumers = linkmap.findConsumers(lib)
13892                                         consumer_cache[lib] = lib_consumers
13893                                 if lib_consumers:
13894                                         consumers[lib] = lib_consumers
13895
13896                         if not consumers:
13897                                 continue
13898
13899                         for lib, lib_consumers in consumers.items():
13900                                 for consumer_file in list(lib_consumers):
13901                                         if pkg_dblink.isowner(consumer_file, myroot):
13902                                                 lib_consumers.remove(consumer_file)
13903                                 if not lib_consumers:
13904                                         del consumers[lib]
13905
13906                         if not consumers:
13907                                 continue
13908
13909                         for lib, lib_consumers in consumers.iteritems():
13910
13911                                 soname = soname_cache.get(lib)
13912                                 if soname is None:
13913                                         soname = linkmap.getSoname(lib)
13914                                         soname_cache[lib] = soname
13915
13916                                 consumer_providers = []
13917                                 for lib_consumer in lib_consumers:
13918                                         providers = provider_cache.get(lib)
13919                                         if providers is None:
13920                                                 providers = linkmap.findProviders(lib_consumer)
13921                                                 provider_cache[lib_consumer] = providers
13922                                         if soname not in providers:
13923                                                 # Why does this happen?
13924                                                 continue
13925                                         consumer_providers.append(
13926                                                 (lib_consumer, providers[soname]))
13927
13928                                 consumers[lib] = consumer_providers
13929
13930                         consumer_map[pkg] = consumers
13931
13932                 if consumer_map:
13933
13934                         search_files = set()
13935                         for consumers in consumer_map.itervalues():
13936                                 for lib, consumer_providers in consumers.iteritems():
13937                                         for lib_consumer, providers in consumer_providers:
13938                                                 search_files.add(lib_consumer)
13939                                                 search_files.update(providers)
13940
13941                         writemsg_level(">>> Assigning files to packages...\n")
13942                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13943
13944                         for pkg, consumers in consumer_map.items():
13945                                 for lib, consumer_providers in consumers.items():
13946                                         lib_consumers = set()
13947
13948                                         for lib_consumer, providers in consumer_providers:
13949                                                 owner_set = file_owners.get(lib_consumer)
13950                                                 provider_dblinks = set()
13951                                                 provider_pkgs = set()
13952
13953                                                 if len(providers) > 1:
13954                                                         for provider in providers:
13955                                                                 provider_set = file_owners.get(provider)
13956                                                                 if provider_set is not None:
13957                                                                         provider_dblinks.update(provider_set)
13958
13959                                                 if len(provider_dblinks) > 1:
13960                                                         for provider_dblink in provider_dblinks:
13961                                                                 pkg_key = ("installed", myroot,
13962                                                                         provider_dblink.mycpv, "nomerge")
13963                                                                 if pkg_key not in clean_set:
13964                                                                         provider_pkgs.add(vardb.get(pkg_key))
13965
13966                                                 if provider_pkgs:
13967                                                         continue
13968
13969                                                 if owner_set is not None:
13970                                                         lib_consumers.update(owner_set)
13971
13972                                         for consumer_dblink in list(lib_consumers):
13973                                                 if ("installed", myroot, consumer_dblink.mycpv,
13974                                                         "nomerge") in clean_set:
13975                                                         lib_consumers.remove(consumer_dblink)
13976                                                         continue
13977
13978                                         if lib_consumers:
13979                                                 consumers[lib] = lib_consumers
13980                                         else:
13981                                                 del consumers[lib]
13982                                 if not consumers:
13983                                         del consumer_map[pkg]
13984
13985                 if consumer_map:
13986                         # TODO: Implement a package set for rebuilding consumer packages.
13987
13988                         msg = "In order to avoid breakage of link level " + \
13989                                 "dependencies, one or more packages will not be removed. " + \
13990                                 "This can be solved by rebuilding " + \
13991                                 "the packages that pulled them in."
13992
13993                         prefix = bad(" * ")
13994                         from textwrap import wrap
13995                         writemsg_level("".join(prefix + "%s\n" % line for \
13996                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13997
13998                         msg = []
13999                         for pkg, consumers in consumer_map.iteritems():
14000                                 unique_consumers = set(chain(*consumers.values()))
14001                                 unique_consumers = sorted(consumer.mycpv \
14002                                         for consumer in unique_consumers)
14003                                 msg.append("")
14004                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14005                                 for consumer in unique_consumers:
14006                                         msg.append("    %s" % (consumer,))
14007                         msg.append("")
14008                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14009                                 level=logging.WARNING, noiselevel=-1)
14010
14011                         # Add lib providers to the graph as children of lib consumers,
14012                         # and also add any dependencies pulled in by the provider.
14013                         writemsg_level(">>> Adding lib providers to graph...\n")
14014
14015                         for pkg, consumers in consumer_map.iteritems():
14016                                 for consumer_dblink in set(chain(*consumers.values())):
14017                                         consumer_pkg = vardb.get(("installed", myroot,
14018                                                 consumer_dblink.mycpv, "nomerge"))
14019                                         if not resolver._add_pkg(pkg,
14020                                                 Dependency(parent=consumer_pkg,
14021                                                 priority=UnmergeDepPriority(runtime=True),
14022                                                 root=pkg.root)):
14023                                                 resolver.display_problems()
14024                                                 return 1
14025
14026                         writemsg_level("\nCalculating dependencies  ")
14027                         success = resolver._complete_graph()
14028                         writemsg_level("\b\b... done!\n")
14029                         resolver.display_problems()
14030                         if not success:
14031                                 return 1
14032                         if unresolved_deps():
14033                                 return 1
14034
14035                         graph = resolver.digraph.copy()
14036                         required_pkgs_total = 0
14037                         for node in graph:
14038                                 if isinstance(node, Package):
14039                                         required_pkgs_total += 1
14040                         cleanlist = create_cleanlist()
14041                         if not cleanlist:
14042                                 return 0
14043                         clean_set = set(cleanlist)
14044
14045                 # Use a topological sort to create an unmerge order such that
14046                 # each package is unmerged before it's dependencies. This is
14047                 # necessary to avoid breaking things that may need to run
14048                 # during pkg_prerm or pkg_postrm phases.
14049
14050                 # Create a new graph to account for dependencies between the
14051                 # packages being unmerged.
14052                 graph = digraph()
14053                 del cleanlist[:]
14054
14055                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14056                 runtime = UnmergeDepPriority(runtime=True)
14057                 runtime_post = UnmergeDepPriority(runtime_post=True)
14058                 buildtime = UnmergeDepPriority(buildtime=True)
14059                 priority_map = {
14060                         "RDEPEND": runtime,
14061                         "PDEPEND": runtime_post,
14062                         "DEPEND": buildtime,
14063                 }
14064
14065                 for node in clean_set:
14066                         graph.add(node, None)
14067                         mydeps = []
14068                         node_use = node.metadata["USE"].split()
14069                         for dep_type in dep_keys:
14070                                 depstr = node.metadata[dep_type]
14071                                 if not depstr:
14072                                         continue
14073                                 try:
14074                                         portage.dep._dep_check_strict = False
14075                                         success, atoms = portage.dep_check(depstr, None, settings,
14076                                                 myuse=node_use, trees=resolver._graph_trees,
14077                                                 myroot=myroot)
14078                                 finally:
14079                                         portage.dep._dep_check_strict = True
14080                                 if not success:
14081                                         # Ignore invalid deps of packages that will
14082                                         # be uninstalled anyway.
14083                                         continue
14084
14085                                 priority = priority_map[dep_type]
14086                                 for atom in atoms:
14087                                         if not isinstance(atom, portage.dep.Atom):
14088                                                 # Ignore invalid atoms returned from dep_check().
14089                                                 continue
14090                                         if atom.blocker:
14091                                                 continue
14092                                         matches = vardb.match_pkgs(atom)
14093                                         if not matches:
14094                                                 continue
14095                                         for child_node in matches:
14096                                                 if child_node in clean_set:
14097                                                         graph.add(child_node, node, priority=priority)
14098
14099                 ordered = True
14100                 if len(graph.order) == len(graph.root_nodes()):
14101                         # If there are no dependencies between packages
14102                         # let unmerge() group them by cat/pn.
14103                         ordered = False
14104                         cleanlist = [pkg.cpv for pkg in graph.order]
14105                 else:
14106                         # Order nodes from lowest to highest overall reference count for
14107                         # optimal root node selection.
14108                         node_refcounts = {}
14109                         for node in graph.order:
14110                                 node_refcounts[node] = len(graph.parent_nodes(node))
14111                         def cmp_reference_count(node1, node2):
14112                                 return node_refcounts[node1] - node_refcounts[node2]
14113                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14114         
14115                         ignore_priority_range = [None]
14116                         ignore_priority_range.extend(
14117                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14118                         while not graph.empty():
14119                                 for ignore_priority in ignore_priority_range:
14120                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14121                                         if nodes:
14122                                                 break
14123                                 if not nodes:
14124                                         raise AssertionError("no root nodes")
14125                                 if ignore_priority is not None:
14126                                         # Some deps have been dropped due to circular dependencies,
14127                                         # so only pop one node in order do minimize the number that
14128                                         # are dropped.
14129                                         del nodes[1:]
14130                                 for node in nodes:
14131                                         graph.remove(node)
14132                                         cleanlist.append(node.cpv)
14133
14134                 unmerge(root_config, myopts, "unmerge", cleanlist,
14135                         ldpath_mtimes, ordered=ordered)
14136
14137         if action == "prune":
14138                 return
14139
14140         if not cleanlist and "--quiet" in myopts:
14141                 return
14142
14143         print "Packages installed:   "+str(len(vardb.cpv_all()))
14144         print "Packages in world:    " + \
14145                 str(len(root_config.sets["world"].getAtoms()))
14146         print "Packages in system:   " + \
14147                 str(len(root_config.sets["system"].getAtoms()))
14148         print "Required packages:    "+str(required_pkgs_total)
14149         if "--pretend" in myopts:
14150                 print "Number to remove:     "+str(len(cleanlist))
14151         else:
14152                 print "Number removed:       "+str(len(cleanlist))
14153
14154 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14155         """
14156         Construct a depgraph for the given resume list. This will raise
14157         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14158         @rtype: tuple
14159         @returns: (success, depgraph, dropped_tasks)
14160         """
14161         skip_masked = True
14162         skip_unsatisfied = True
14163         mergelist = mtimedb["resume"]["mergelist"]
14164         dropped_tasks = set()
14165         while True:
14166                 mydepgraph = depgraph(settings, trees,
14167                         myopts, myparams, spinner)
14168                 try:
14169                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14170                                 skip_masked=skip_masked)
14171                 except depgraph.UnsatisfiedResumeDep, e:
14172                         if not skip_unsatisfied:
14173                                 raise
14174
14175                         graph = mydepgraph.digraph
14176                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14177                                 for dep in e.value)
14178                         traversed_nodes = set()
14179                         unsatisfied_stack = list(unsatisfied_parents)
14180                         while unsatisfied_stack:
14181                                 pkg = unsatisfied_stack.pop()
14182                                 if pkg in traversed_nodes:
14183                                         continue
14184                                 traversed_nodes.add(pkg)
14185
14186                                 # If this package was pulled in by a parent
14187                                 # package scheduled for merge, removing this
14188                                 # package may cause the the parent package's
14189                                 # dependency to become unsatisfied.
14190                                 for parent_node in graph.parent_nodes(pkg):
14191                                         if not isinstance(parent_node, Package) \
14192                                                 or parent_node.operation not in ("merge", "nomerge"):
14193                                                 continue
14194                                         unsatisfied = \
14195                                                 graph.child_nodes(parent_node,
14196                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14197                                         if pkg in unsatisfied:
14198                                                 unsatisfied_parents[parent_node] = parent_node
14199                                                 unsatisfied_stack.append(parent_node)
14200
14201                         pruned_mergelist = []
14202                         for x in mergelist:
14203                                 if isinstance(x, list) and \
14204                                         tuple(x) not in unsatisfied_parents:
14205                                         pruned_mergelist.append(x)
14206
14207                         # If the mergelist doesn't shrink then this loop is infinite.
14208                         if len(pruned_mergelist) == len(mergelist):
14209                                 # This happens if a package can't be dropped because
14210                                 # it's already installed, but it has unsatisfied PDEPEND.
14211                                 raise
14212                         mergelist[:] = pruned_mergelist
14213
14214                         # Exclude installed packages that have been removed from the graph due
14215                         # to failure to build/install runtime dependencies after the dependent
14216                         # package has already been installed.
14217                         dropped_tasks.update(pkg for pkg in \
14218                                 unsatisfied_parents if pkg.operation != "nomerge")
14219                         mydepgraph.break_refs(unsatisfied_parents)
14220
14221                         del e, graph, traversed_nodes, \
14222                                 unsatisfied_parents, unsatisfied_stack
14223                         continue
14224                 else:
14225                         break
14226         return (success, mydepgraph, dropped_tasks)
14227
14228 def action_build(settings, trees, mtimedb,
14229         myopts, myaction, myfiles, spinner):
14230
14231         # validate the state of the resume data
14232         # so that we can make assumptions later.
14233         for k in ("resume", "resume_backup"):
14234                 if k not in mtimedb:
14235                         continue
14236                 resume_data = mtimedb[k]
14237                 if not isinstance(resume_data, dict):
14238                         del mtimedb[k]
14239                         continue
14240                 mergelist = resume_data.get("mergelist")
14241                 if not isinstance(mergelist, list):
14242                         del mtimedb[k]
14243                         continue
14244                 for x in mergelist:
14245                         if not (isinstance(x, list) and len(x) == 4):
14246                                 continue
14247                         pkg_type, pkg_root, pkg_key, pkg_action = x
14248                         if pkg_root not in trees:
14249                                 # Current $ROOT setting differs,
14250                                 # so the list must be stale.
14251                                 mergelist = None
14252                                 break
14253                 if not mergelist:
14254                         del mtimedb[k]
14255                         continue
14256                 resume_opts = resume_data.get("myopts")
14257                 if not isinstance(resume_opts, (dict, list)):
14258                         del mtimedb[k]
14259                         continue
14260                 favorites = resume_data.get("favorites")
14261                 if not isinstance(favorites, list):
14262                         del mtimedb[k]
14263                         continue
14264
14265         resume = False
14266         if "--resume" in myopts and \
14267                 ("resume" in mtimedb or
14268                 "resume_backup" in mtimedb):
14269                 resume = True
14270                 if "resume" not in mtimedb:
14271                         mtimedb["resume"] = mtimedb["resume_backup"]
14272                         del mtimedb["resume_backup"]
14273                         mtimedb.commit()
14274                 # "myopts" is a list for backward compatibility.
14275                 resume_opts = mtimedb["resume"].get("myopts", [])
14276                 if isinstance(resume_opts, list):
14277                         resume_opts = dict((k,True) for k in resume_opts)
14278                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14279                         resume_opts.pop(opt, None)
14280                 myopts.update(resume_opts)
14281
14282                 if "--debug" in myopts:
14283                         writemsg_level("myopts %s\n" % (myopts,))
14284
14285                 # Adjust config according to options of the command being resumed.
14286                 for myroot in trees:
14287                         mysettings =  trees[myroot]["vartree"].settings
14288                         mysettings.unlock()
14289                         adjust_config(myopts, mysettings)
14290                         mysettings.lock()
14291                         del myroot, mysettings
14292
14293         ldpath_mtimes = mtimedb["ldpath"]
14294         favorites=[]
14295         merge_count = 0
14296         buildpkgonly = "--buildpkgonly" in myopts
14297         pretend = "--pretend" in myopts
14298         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14299         ask = "--ask" in myopts
14300         nodeps = "--nodeps" in myopts
14301         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14302         tree = "--tree" in myopts
14303         if nodeps and tree:
14304                 tree = False
14305                 del myopts["--tree"]
14306                 portage.writemsg(colorize("WARN", " * ") + \
14307                         "--tree is broken with --nodeps. Disabling...\n")
14308         debug = "--debug" in myopts
14309         verbose = "--verbose" in myopts
14310         quiet = "--quiet" in myopts
14311         if pretend or fetchonly:
14312                 # make the mtimedb readonly
14313                 mtimedb.filename = None
14314         if '--digest' in myopts or 'digest' in settings.features:
14315                 if '--digest' in myopts:
14316                         msg = "The --digest option"
14317                 else:
14318                         msg = "The FEATURES=digest setting"
14319
14320                 msg += " can prevent corruption from being" + \
14321                         " noticed. The `repoman manifest` command is the preferred" + \
14322                         " way to generate manifests and it is capable of doing an" + \
14323                         " entire repository or category at once."
14324                 prefix = bad(" * ")
14325                 writemsg(prefix + "\n")
14326                 from textwrap import wrap
14327                 for line in wrap(msg, 72):
14328                         writemsg("%s%s\n" % (prefix, line))
14329                 writemsg(prefix + "\n")
14330
14331         if "--quiet" not in myopts and \
14332                 ("--pretend" in myopts or "--ask" in myopts or \
14333                 "--tree" in myopts or "--verbose" in myopts):
14334                 action = ""
14335                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14336                         action = "fetched"
14337                 elif "--buildpkgonly" in myopts:
14338                         action = "built"
14339                 else:
14340                         action = "merged"
14341                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14342                         print
14343                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14344                         print
14345                 else:
14346                         print
14347                         print darkgreen("These are the packages that would be %s, in order:") % action
14348                         print
14349
14350         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14351         if not show_spinner:
14352                 spinner.update = spinner.update_quiet
14353
14354         if resume:
14355                 favorites = mtimedb["resume"].get("favorites")
14356                 if not isinstance(favorites, list):
14357                         favorites = []
14358
14359                 if show_spinner:
14360                         print "Calculating dependencies  ",
14361                 myparams = create_depgraph_params(myopts, myaction)
14362
14363                 resume_data = mtimedb["resume"]
14364                 mergelist = resume_data["mergelist"]
14365                 if mergelist and "--skipfirst" in myopts:
14366                         for i, task in enumerate(mergelist):
14367                                 if isinstance(task, list) and \
14368                                         task and task[-1] == "merge":
14369                                         del mergelist[i]
14370                                         break
14371
14372                 success = False
14373                 mydepgraph = None
14374                 try:
14375                         success, mydepgraph, dropped_tasks = resume_depgraph(
14376                                 settings, trees, mtimedb, myopts, myparams, spinner)
14377                 except (portage.exception.PackageNotFound,
14378                         depgraph.UnsatisfiedResumeDep), e:
14379                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14380                                 mydepgraph = e.depgraph
14381                         if show_spinner:
14382                                 print
14383                         from textwrap import wrap
14384                         from portage.output import EOutput
14385                         out = EOutput()
14386
14387                         resume_data = mtimedb["resume"]
14388                         mergelist = resume_data.get("mergelist")
14389                         if not isinstance(mergelist, list):
14390                                 mergelist = []
14391                         if mergelist and debug or (verbose and not quiet):
14392                                 out.eerror("Invalid resume list:")
14393                                 out.eerror("")
14394                                 indent = "  "
14395                                 for task in mergelist:
14396                                         if isinstance(task, list):
14397                                                 out.eerror(indent + str(tuple(task)))
14398                                 out.eerror("")
14399
14400                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14401                                 out.eerror("One or more packages are either masked or " + \
14402                                         "have missing dependencies:")
14403                                 out.eerror("")
14404                                 indent = "  "
14405                                 for dep in e.value:
14406                                         if dep.atom is None:
14407                                                 out.eerror(indent + "Masked package:")
14408                                                 out.eerror(2 * indent + str(dep.parent))
14409                                                 out.eerror("")
14410                                         else:
14411                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14412                                                 out.eerror(2 * indent + str(dep.parent))
14413                                                 out.eerror("")
14414                                 msg = "The resume list contains packages " + \
14415                                         "that are either masked or have " + \
14416                                         "unsatisfied dependencies. " + \
14417                                         "Please restart/continue " + \
14418                                         "the operation manually, or use --skipfirst " + \
14419                                         "to skip the first package in the list and " + \
14420                                         "any other packages that may be " + \
14421                                         "masked or have missing dependencies."
14422                                 for line in wrap(msg, 72):
14423                                         out.eerror(line)
14424                         elif isinstance(e, portage.exception.PackageNotFound):
14425                                 out.eerror("An expected package is " + \
14426                                         "not available: %s" % str(e))
14427                                 out.eerror("")
14428                                 msg = "The resume list contains one or more " + \
14429                                         "packages that are no longer " + \
14430                                         "available. Please restart/continue " + \
14431                                         "the operation manually."
14432                                 for line in wrap(msg, 72):
14433                                         out.eerror(line)
14434                 else:
14435                         if show_spinner:
14436                                 print "\b\b... done!"
14437
14438                 if success:
14439                         if dropped_tasks:
14440                                 portage.writemsg("!!! One or more packages have been " + \
14441                                         "dropped due to\n" + \
14442                                         "!!! masking or unsatisfied dependencies:\n\n",
14443                                         noiselevel=-1)
14444                                 for task in dropped_tasks:
14445                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14446                                 portage.writemsg("\n", noiselevel=-1)
14447                         del dropped_tasks
14448                 else:
14449                         if mydepgraph is not None:
14450                                 mydepgraph.display_problems()
14451                         if not (ask or pretend):
14452                                 # delete the current list and also the backup
14453                                 # since it's probably stale too.
14454                                 for k in ("resume", "resume_backup"):
14455                                         mtimedb.pop(k, None)
14456                                 mtimedb.commit()
14457
14458                         return 1
14459         else:
14460                 if ("--resume" in myopts):
14461                         print darkgreen("emerge: It seems we have nothing to resume...")
14462                         return os.EX_OK
14463
14464                 myparams = create_depgraph_params(myopts, myaction)
14465                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14466                         print "Calculating dependencies  ",
14467                         sys.stdout.flush()
14468                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14469                 try:
14470                         retval, favorites = mydepgraph.select_files(myfiles)
14471                 except portage.exception.PackageNotFound, e:
14472                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14473                         return 1
14474                 except portage.exception.PackageSetNotFound, e:
14475                         root_config = trees[settings["ROOT"]]["root_config"]
14476                         display_missing_pkg_set(root_config, e.value)
14477                         return 1
14478                 if show_spinner:
14479                         print "\b\b... done!"
14480                 if not retval:
14481                         mydepgraph.display_problems()
14482                         return 1
14483
14484         if "--pretend" not in myopts and \
14485                 ("--ask" in myopts or "--tree" in myopts or \
14486                 "--verbose" in myopts) and \
14487                 not ("--quiet" in myopts and "--ask" not in myopts):
14488                 if "--resume" in myopts:
14489                         mymergelist = mydepgraph.altlist()
14490                         if len(mymergelist) == 0:
14491                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14492                                 return os.EX_OK
14493                         favorites = mtimedb["resume"]["favorites"]
14494                         retval = mydepgraph.display(
14495                                 mydepgraph.altlist(reversed=tree),
14496                                 favorites=favorites)
14497                         mydepgraph.display_problems()
14498                         if retval != os.EX_OK:
14499                                 return retval
14500                         prompt="Would you like to resume merging these packages?"
14501                 else:
14502                         retval = mydepgraph.display(
14503                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14504                                 favorites=favorites)
14505                         mydepgraph.display_problems()
14506                         if retval != os.EX_OK:
14507                                 return retval
14508                         mergecount=0
14509                         for x in mydepgraph.altlist():
14510                                 if isinstance(x, Package) and x.operation == "merge":
14511                                         mergecount += 1
14512
14513                         if mergecount==0:
14514                                 sets = trees[settings["ROOT"]]["root_config"].sets
14515                                 world_candidates = None
14516                                 if "--noreplace" in myopts and \
14517                                         not oneshot and favorites:
14518                                         # Sets that are not world candidates are filtered
14519                                         # out here since the favorites list needs to be
14520                                         # complete for depgraph.loadResumeCommand() to
14521                                         # operate correctly.
14522                                         world_candidates = [x for x in favorites \
14523                                                 if not (x.startswith(SETPREFIX) and \
14524                                                 not sets[x[1:]].world_candidate)]
14525                                 if "--noreplace" in myopts and \
14526                                         not oneshot and world_candidates:
14527                                         print
14528                                         for x in world_candidates:
14529                                                 print " %s %s" % (good("*"), x)
14530                                         prompt="Would you like to add these packages to your world favorites?"
14531                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14532                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14533                                 else:
14534                                         print
14535                                         print "Nothing to merge; quitting."
14536                                         print
14537                                         return os.EX_OK
14538                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14539                                 prompt="Would you like to fetch the source files for these packages?"
14540                         else:
14541                                 prompt="Would you like to merge these packages?"
14542                 print
14543                 if "--ask" in myopts and userquery(prompt) == "No":
14544                         print
14545                         print "Quitting."
14546                         print
14547                         return os.EX_OK
14548                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14549                 myopts.pop("--ask", None)
14550
14551         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14552                 if ("--resume" in myopts):
14553                         mymergelist = mydepgraph.altlist()
14554                         if len(mymergelist) == 0:
14555                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14556                                 return os.EX_OK
14557                         favorites = mtimedb["resume"]["favorites"]
14558                         retval = mydepgraph.display(
14559                                 mydepgraph.altlist(reversed=tree),
14560                                 favorites=favorites)
14561                         mydepgraph.display_problems()
14562                         if retval != os.EX_OK:
14563                                 return retval
14564                 else:
14565                         retval = mydepgraph.display(
14566                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14567                                 favorites=favorites)
14568                         mydepgraph.display_problems()
14569                         if retval != os.EX_OK:
14570                                 return retval
14571                         if "--buildpkgonly" in myopts:
14572                                 graph_copy = mydepgraph.digraph.clone()
14573                                 removed_nodes = set()
14574                                 for node in graph_copy:
14575                                         if not isinstance(node, Package) or \
14576                                                 node.operation == "nomerge":
14577                                                 removed_nodes.add(node)
14578                                 graph_copy.difference_update(removed_nodes)
14579                                 if not graph_copy.hasallzeros(ignore_priority = \
14580                                         DepPrioritySatisfiedRange.ignore_medium):
14581                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14582                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14583                                         return 1
14584         else:
14585                 if "--buildpkgonly" in myopts:
14586                         graph_copy = mydepgraph.digraph.clone()
14587                         removed_nodes = set()
14588                         for node in graph_copy:
14589                                 if not isinstance(node, Package) or \
14590                                         node.operation == "nomerge":
14591                                         removed_nodes.add(node)
14592                         graph_copy.difference_update(removed_nodes)
14593                         if not graph_copy.hasallzeros(ignore_priority = \
14594                                 DepPrioritySatisfiedRange.ignore_medium):
14595                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14596                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14597                                 return 1
14598
14599                 if ("--resume" in myopts):
14600                         favorites=mtimedb["resume"]["favorites"]
14601                         mymergelist = mydepgraph.altlist()
14602                         mydepgraph.break_refs(mymergelist)
14603                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14604                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14605                         del mydepgraph, mymergelist
14606                         clear_caches(trees)
14607
14608                         retval = mergetask.merge()
14609                         merge_count = mergetask.curval
14610                 else:
14611                         if "resume" in mtimedb and \
14612                         "mergelist" in mtimedb["resume"] and \
14613                         len(mtimedb["resume"]["mergelist"]) > 1:
14614                                 mtimedb["resume_backup"] = mtimedb["resume"]
14615                                 del mtimedb["resume"]
14616                                 mtimedb.commit()
14617                         mtimedb["resume"]={}
14618                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14619                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14620                         # a list type for options.
14621                         mtimedb["resume"]["myopts"] = myopts.copy()
14622
14623                         # Convert Atom instances to plain str.
14624                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14625
14626                         pkglist = mydepgraph.altlist()
14627                         mydepgraph.saveNomergeFavorites()
14628                         mydepgraph.break_refs(pkglist)
14629                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14630                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14631                         del mydepgraph, pkglist
14632                         clear_caches(trees)
14633
14634                         retval = mergetask.merge()
14635                         merge_count = mergetask.curval
14636
14637                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14638                         if "yes" == settings.get("AUTOCLEAN"):
14639                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14640                                 unmerge(trees[settings["ROOT"]]["root_config"],
14641                                         myopts, "clean", [],
14642                                         ldpath_mtimes, autoclean=1)
14643                         else:
14644                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14645                                         + " AUTOCLEAN is disabled.  This can cause serious"
14646                                         + " problems due to overlapping packages.\n")
14647                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14648
14649                 return retval
14650
14651 def multiple_actions(action1, action2):
14652         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14653         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14654         sys.exit(1)
14655
14656 def insert_optional_args(args):
14657         """
14658         Parse optional arguments and insert a value if one has
14659         not been provided. This is done before feeding the args
14660         to the optparse parser since that parser does not support
14661         this feature natively.
14662         """
14663
14664         new_args = []
14665         jobs_opts = ("-j", "--jobs")
14666         arg_stack = args[:]
14667         arg_stack.reverse()
14668         while arg_stack:
14669                 arg = arg_stack.pop()
14670
14671                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14672                 if not (short_job_opt or arg in jobs_opts):
14673                         new_args.append(arg)
14674                         continue
14675
14676                 # Insert an empty placeholder in order to
14677                 # satisfy the requirements of optparse.
14678
14679                 new_args.append("--jobs")
14680                 job_count = None
14681                 saved_opts = None
14682                 if short_job_opt and len(arg) > 2:
14683                         if arg[:2] == "-j":
14684                                 try:
14685                                         job_count = int(arg[2:])
14686                                 except ValueError:
14687                                         saved_opts = arg[2:]
14688                         else:
14689                                 job_count = "True"
14690                                 saved_opts = arg[1:].replace("j", "")
14691
14692                 if job_count is None and arg_stack:
14693                         try:
14694                                 job_count = int(arg_stack[-1])
14695                         except ValueError:
14696                                 pass
14697                         else:
14698                                 # Discard the job count from the stack
14699                                 # since we're consuming it here.
14700                                 arg_stack.pop()
14701
14702                 if job_count is None:
14703                         # unlimited number of jobs
14704                         new_args.append("True")
14705                 else:
14706                         new_args.append(str(job_count))
14707
14708                 if saved_opts is not None:
14709                         new_args.append("-" + saved_opts)
14710
14711         return new_args
14712
14713 def parse_opts(tmpcmdline, silent=False):
14714         myaction=None
14715         myopts = {}
14716         myfiles=[]
14717
14718         global actions, options, shortmapping
14719
14720         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14721         argument_options = {
14722                 "--config-root": {
14723                         "help":"specify the location for portage configuration files",
14724                         "action":"store"
14725                 },
14726                 "--color": {
14727                         "help":"enable or disable color output",
14728                         "type":"choice",
14729                         "choices":("y", "n")
14730                 },
14731
14732                 "--jobs": {
14733
14734                         "help"   : "Specifies the number of packages to build " + \
14735                                 "simultaneously.",
14736
14737                         "action" : "store"
14738                 },
14739
14740                 "--load-average": {
14741
14742                         "help"   :"Specifies that no new builds should be started " + \
14743                                 "if there are other builds running and the load average " + \
14744                                 "is at least LOAD (a floating-point number).",
14745
14746                         "action" : "store"
14747                 },
14748
14749                 "--with-bdeps": {
14750                         "help":"include unnecessary build time dependencies",
14751                         "type":"choice",
14752                         "choices":("y", "n")
14753                 },
14754                 "--reinstall": {
14755                         "help":"specify conditions to trigger package reinstallation",
14756                         "type":"choice",
14757                         "choices":["changed-use"]
14758                 }
14759         }
14760
14761         from optparse import OptionParser
14762         parser = OptionParser()
14763         if parser.has_option("--help"):
14764                 parser.remove_option("--help")
14765
14766         for action_opt in actions:
14767                 parser.add_option("--" + action_opt, action="store_true",
14768                         dest=action_opt.replace("-", "_"), default=False)
14769         for myopt in options:
14770                 parser.add_option(myopt, action="store_true",
14771                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14772         for shortopt, longopt in shortmapping.iteritems():
14773                 parser.add_option("-" + shortopt, action="store_true",
14774                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14775         for myalias, myopt in longopt_aliases.iteritems():
14776                 parser.add_option(myalias, action="store_true",
14777                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14778
14779         for myopt, kwargs in argument_options.iteritems():
14780                 parser.add_option(myopt,
14781                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14782
14783         tmpcmdline = insert_optional_args(tmpcmdline)
14784
14785         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14786
14787         if myoptions.jobs:
14788                 jobs = None
14789                 if myoptions.jobs == "True":
14790                         jobs = True
14791                 else:
14792                         try:
14793                                 jobs = int(myoptions.jobs)
14794                         except ValueError:
14795                                 jobs = -1
14796
14797                 if jobs is not True and \
14798                         jobs < 1:
14799                         jobs = None
14800                         if not silent:
14801                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14802                                         (myoptions.jobs,), noiselevel=-1)
14803
14804                 myoptions.jobs = jobs
14805
14806         if myoptions.load_average:
14807                 try:
14808                         load_average = float(myoptions.load_average)
14809                 except ValueError:
14810                         load_average = 0.0
14811
14812                 if load_average <= 0.0:
14813                         load_average = None
14814                         if not silent:
14815                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14816                                         (myoptions.load_average,), noiselevel=-1)
14817
14818                 myoptions.load_average = load_average
14819
14820         for myopt in options:
14821                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14822                 if v:
14823                         myopts[myopt] = True
14824
14825         for myopt in argument_options:
14826                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14827                 if v is not None:
14828                         myopts[myopt] = v
14829
14830         if myoptions.searchdesc:
14831                 myoptions.search = True
14832
14833         for action_opt in actions:
14834                 v = getattr(myoptions, action_opt.replace("-", "_"))
14835                 if v:
14836                         if myaction:
14837                                 multiple_actions(myaction, action_opt)
14838                                 sys.exit(1)
14839                         myaction = action_opt
14840
14841         myfiles += myargs
14842
14843         return myaction, myopts, myfiles
14844
14845 def validate_ebuild_environment(trees):
14846         for myroot in trees:
14847                 settings = trees[myroot]["vartree"].settings
14848                 settings.validate()
14849
14850 def clear_caches(trees):
14851         for d in trees.itervalues():
14852                 d["porttree"].dbapi.melt()
14853                 d["porttree"].dbapi._aux_cache.clear()
14854                 d["bintree"].dbapi._aux_cache.clear()
14855                 d["bintree"].dbapi._clear_cache()
14856                 d["vartree"].dbapi.linkmap._clear_cache()
14857         portage.dircache.clear()
14858         gc.collect()
14859
14860 def load_emerge_config(trees=None):
14861         kwargs = {}
14862         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14863                 v = os.environ.get(envvar, None)
14864                 if v and v.strip():
14865                         kwargs[k] = v
14866         trees = portage.create_trees(trees=trees, **kwargs)
14867
14868         for root, root_trees in trees.iteritems():
14869                 settings = root_trees["vartree"].settings
14870                 setconfig = load_default_config(settings, root_trees)
14871                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14872
14873         settings = trees["/"]["vartree"].settings
14874
14875         for myroot in trees:
14876                 if myroot != "/":
14877                         settings = trees[myroot]["vartree"].settings
14878                         break
14879
14880         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14881         mtimedb = portage.MtimeDB(mtimedbfile)
14882         
14883         return settings, trees, mtimedb
14884
14885 def adjust_config(myopts, settings):
14886         """Make emerge specific adjustments to the config."""
14887
14888         # To enhance usability, make some vars case insensitive by forcing them to
14889         # lower case.
14890         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14891                 if myvar in settings:
14892                         settings[myvar] = settings[myvar].lower()
14893                         settings.backup_changes(myvar)
14894         del myvar
14895
14896         # Kill noauto as it will break merges otherwise.
14897         if "noauto" in settings.features:
14898                 while "noauto" in settings.features:
14899                         settings.features.remove("noauto")
14900                 settings["FEATURES"] = " ".join(settings.features)
14901                 settings.backup_changes("FEATURES")
14902
14903         CLEAN_DELAY = 5
14904         try:
14905                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14906         except ValueError, e:
14907                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14908                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14909                         settings["CLEAN_DELAY"], noiselevel=-1)
14910         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14911         settings.backup_changes("CLEAN_DELAY")
14912
14913         EMERGE_WARNING_DELAY = 10
14914         try:
14915                 EMERGE_WARNING_DELAY = int(settings.get(
14916                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14917         except ValueError, e:
14918                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14919                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14920                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14921         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14922         settings.backup_changes("EMERGE_WARNING_DELAY")
14923
14924         if "--quiet" in myopts:
14925                 settings["PORTAGE_QUIET"]="1"
14926                 settings.backup_changes("PORTAGE_QUIET")
14927
14928         if "--verbose" in myopts:
14929                 settings["PORTAGE_VERBOSE"] = "1"
14930                 settings.backup_changes("PORTAGE_VERBOSE")
14931
14932         # Set so that configs will be merged regardless of remembered status
14933         if ("--noconfmem" in myopts):
14934                 settings["NOCONFMEM"]="1"
14935                 settings.backup_changes("NOCONFMEM")
14936
14937         # Set various debug markers... They should be merged somehow.
14938         PORTAGE_DEBUG = 0
14939         try:
14940                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14941                 if PORTAGE_DEBUG not in (0, 1):
14942                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14943                                 PORTAGE_DEBUG, noiselevel=-1)
14944                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14945                                 noiselevel=-1)
14946                         PORTAGE_DEBUG = 0
14947         except ValueError, e:
14948                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14949                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14950                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14951                 del e
14952         if "--debug" in myopts:
14953                 PORTAGE_DEBUG = 1
14954         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14955         settings.backup_changes("PORTAGE_DEBUG")
14956
14957         if settings.get("NOCOLOR") not in ("yes","true"):
14958                 portage.output.havecolor = 1
14959
14960         """The explicit --color < y | n > option overrides the NOCOLOR environment
14961         variable and stdout auto-detection."""
14962         if "--color" in myopts:
14963                 if "y" == myopts["--color"]:
14964                         portage.output.havecolor = 1
14965                         settings["NOCOLOR"] = "false"
14966                 else:
14967                         portage.output.havecolor = 0
14968                         settings["NOCOLOR"] = "true"
14969                 settings.backup_changes("NOCOLOR")
14970         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14971                 portage.output.havecolor = 0
14972                 settings["NOCOLOR"] = "true"
14973                 settings.backup_changes("NOCOLOR")
14974
14975 def apply_priorities(settings):
14976         ionice(settings)
14977         nice(settings)
14978
14979 def nice(settings):
14980         try:
14981                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14982         except (OSError, ValueError), e:
14983                 out = portage.output.EOutput()
14984                 out.eerror("Failed to change nice value to '%s'" % \
14985                         settings["PORTAGE_NICENESS"])
14986                 out.eerror("%s\n" % str(e))
14987
14988 def ionice(settings):
14989
14990         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14991         if ionice_cmd:
14992                 ionice_cmd = shlex.split(ionice_cmd)
14993         if not ionice_cmd:
14994                 return
14995
14996         from portage.util import varexpand
14997         variables = {"PID" : str(os.getpid())}
14998         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14999
15000         try:
15001                 rval = portage.process.spawn(cmd, env=os.environ)
15002         except portage.exception.CommandNotFound:
15003                 # The OS kernel probably doesn't support ionice,
15004                 # so return silently.
15005                 return
15006
15007         if rval != os.EX_OK:
15008                 out = portage.output.EOutput()
15009                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15010                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15011
15012 def display_missing_pkg_set(root_config, set_name):
15013
15014         msg = []
15015         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15016                 "The following sets exist:") % \
15017                 colorize("INFORM", set_name))
15018         msg.append("")
15019
15020         for s in sorted(root_config.sets):
15021                 msg.append("    %s" % s)
15022         msg.append("")
15023
15024         writemsg_level("".join("%s\n" % l for l in msg),
15025                 level=logging.ERROR, noiselevel=-1)
15026
15027 def expand_set_arguments(myfiles, myaction, root_config):
15028         retval = os.EX_OK
15029         setconfig = root_config.setconfig
15030
15031         sets = setconfig.getSets()
15032
15033         # In order to know exactly which atoms/sets should be added to the
15034         # world file, the depgraph performs set expansion later. It will get
15035         # confused about where the atoms came from if it's not allowed to
15036         # expand them itself.
15037         do_not_expand = (None, )
15038         newargs = []
15039         for a in myfiles:
15040                 if a in ("system", "world"):
15041                         newargs.append(SETPREFIX+a)
15042                 else:
15043                         newargs.append(a)
15044         myfiles = newargs
15045         del newargs
15046         newargs = []
15047
15048         # separators for set arguments
15049         ARG_START = "{"
15050         ARG_END = "}"
15051
15052         # WARNING: all operators must be of equal length
15053         IS_OPERATOR = "/@"
15054         DIFF_OPERATOR = "-@"
15055         UNION_OPERATOR = "+@"
15056         
15057         for i in range(0, len(myfiles)):
15058                 if myfiles[i].startswith(SETPREFIX):
15059                         start = 0
15060                         end = 0
15061                         x = myfiles[i][len(SETPREFIX):]
15062                         newset = ""
15063                         while x:
15064                                 start = x.find(ARG_START)
15065                                 end = x.find(ARG_END)
15066                                 if start > 0 and start < end:
15067                                         namepart = x[:start]
15068                                         argpart = x[start+1:end]
15069                                 
15070                                         # TODO: implement proper quoting
15071                                         args = argpart.split(",")
15072                                         options = {}
15073                                         for a in args:
15074                                                 if "=" in a:
15075                                                         k, v  = a.split("=", 1)
15076                                                         options[k] = v
15077                                                 else:
15078                                                         options[a] = "True"
15079                                         setconfig.update(namepart, options)
15080                                         newset += (x[:start-len(namepart)]+namepart)
15081                                         x = x[end+len(ARG_END):]
15082                                 else:
15083                                         newset += x
15084                                         x = ""
15085                         myfiles[i] = SETPREFIX+newset
15086                                 
15087         sets = setconfig.getSets()
15088
15089         # display errors that occured while loading the SetConfig instance
15090         for e in setconfig.errors:
15091                 print colorize("BAD", "Error during set creation: %s" % e)
15092         
15093         # emerge relies on the existance of sets with names "world" and "system"
15094         required_sets = ("world", "system")
15095         missing_sets = []
15096
15097         for s in required_sets:
15098                 if s not in sets:
15099                         missing_sets.append(s)
15100         if missing_sets:
15101                 if len(missing_sets) > 2:
15102                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15103                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15104                 elif len(missing_sets) == 2:
15105                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15106                 else:
15107                         missing_sets_str = '"%s"' % missing_sets[-1]
15108                 msg = ["emerge: incomplete set configuration, " + \
15109                         "missing set(s): %s" % missing_sets_str]
15110                 if sets:
15111                         msg.append("        sets defined: %s" % ", ".join(sets))
15112                 msg.append("        This usually means that '%s'" % \
15113                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15114                 msg.append("        is missing or corrupt.")
15115                 for line in msg:
15116                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15117                 return (None, 1)
15118         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15119
15120         for a in myfiles:
15121                 if a.startswith(SETPREFIX):
15122                         # support simple set operations (intersection, difference and union)
15123                         # on the commandline. Expressions are evaluated strictly left-to-right
15124                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15125                                 expression = a[len(SETPREFIX):]
15126                                 expr_sets = []
15127                                 expr_ops = []
15128                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15129                                         is_pos = expression.rfind(IS_OPERATOR)
15130                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15131                                         union_pos = expression.rfind(UNION_OPERATOR)
15132                                         op_pos = max(is_pos, diff_pos, union_pos)
15133                                         s1 = expression[:op_pos]
15134                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15135                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15136                                         if not s2 in sets:
15137                                                 display_missing_pkg_set(root_config, s2)
15138                                                 return (None, 1)
15139                                         expr_sets.insert(0, s2)
15140                                         expr_ops.insert(0, op)
15141                                         expression = s1
15142                                 if not expression in sets:
15143                                         display_missing_pkg_set(root_config, expression)
15144                                         return (None, 1)
15145                                 expr_sets.insert(0, expression)
15146                                 result = set(setconfig.getSetAtoms(expression))
15147                                 for i in range(0, len(expr_ops)):
15148                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15149                                         if expr_ops[i] == IS_OPERATOR:
15150                                                 result.intersection_update(s2)
15151                                         elif expr_ops[i] == DIFF_OPERATOR:
15152                                                 result.difference_update(s2)
15153                                         elif expr_ops[i] == UNION_OPERATOR:
15154                                                 result.update(s2)
15155                                         else:
15156                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15157                                 newargs.extend(result)
15158                         else:                   
15159                                 s = a[len(SETPREFIX):]
15160                                 if s not in sets:
15161                                         display_missing_pkg_set(root_config, s)
15162                                         return (None, 1)
15163                                 setconfig.active.append(s)
15164                                 try:
15165                                         set_atoms = setconfig.getSetAtoms(s)
15166                                 except portage.exception.PackageSetNotFound, e:
15167                                         writemsg_level(("emerge: the given set '%s' " + \
15168                                                 "contains a non-existent set named '%s'.\n") % \
15169                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15170                                         return (None, 1)
15171                                 if myaction in unmerge_actions and \
15172                                                 not sets[s].supportsOperation("unmerge"):
15173                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15174                                                 "not support unmerge operations\n")
15175                                         retval = 1
15176                                 elif not set_atoms:
15177                                         print "emerge: '%s' is an empty set" % s
15178                                 elif myaction not in do_not_expand:
15179                                         newargs.extend(set_atoms)
15180                                 else:
15181                                         newargs.append(SETPREFIX+s)
15182                                 for e in sets[s].errors:
15183                                         print e
15184                 else:
15185                         newargs.append(a)
15186         return (newargs, retval)
15187
15188 def repo_name_check(trees):
15189         missing_repo_names = set()
15190         for root, root_trees in trees.iteritems():
15191                 if "porttree" in root_trees:
15192                         portdb = root_trees["porttree"].dbapi
15193                         missing_repo_names.update(portdb.porttrees)
15194                         repos = portdb.getRepositories()
15195                         for r in repos:
15196                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15197                         if portdb.porttree_root in missing_repo_names and \
15198                                 not os.path.exists(os.path.join(
15199                                 portdb.porttree_root, "profiles")):
15200                                 # This is normal if $PORTDIR happens to be empty,
15201                                 # so don't warn about it.
15202                                 missing_repo_names.remove(portdb.porttree_root)
15203
15204         if missing_repo_names:
15205                 msg = []
15206                 msg.append("WARNING: One or more repositories " + \
15207                         "have missing repo_name entries:")
15208                 msg.append("")
15209                 for p in missing_repo_names:
15210                         msg.append("\t%s/profiles/repo_name" % (p,))
15211                 msg.append("")
15212                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15213                         "should be a plain text file containing a unique " + \
15214                         "name for the repository on the first line.", 70))
15215                 writemsg_level("".join("%s\n" % l for l in msg),
15216                         level=logging.WARNING, noiselevel=-1)
15217
15218         return bool(missing_repo_names)
15219
15220 def config_protect_check(trees):
15221         for root, root_trees in trees.iteritems():
15222                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15223                         msg = "!!! CONFIG_PROTECT is empty"
15224                         if root != "/":
15225                                 msg += " for '%s'" % root
15226                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15227
15228 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15229
15230         if "--quiet" in myopts:
15231                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15232                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15233                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15234                         print "    " + colorize("INFORM", cp)
15235                 return
15236
15237         s = search(root_config, spinner, "--searchdesc" in myopts,
15238                 "--quiet" not in myopts, "--usepkg" in myopts,
15239                 "--usepkgonly" in myopts)
15240         null_cp = portage.dep_getkey(insert_category_into_atom(
15241                 arg, "null"))
15242         cat, atom_pn = portage.catsplit(null_cp)
15243         s.searchkey = atom_pn
15244         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15245                 s.addCP(cp)
15246         s.output()
15247         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15248         print "!!! one of the above fully-qualified ebuild names instead.\n"
15249
15250 def profile_check(trees, myaction, myopts):
15251         if myaction in ("info", "sync"):
15252                 return os.EX_OK
15253         elif "--version" in myopts or "--help" in myopts:
15254                 return os.EX_OK
15255         for root, root_trees in trees.iteritems():
15256                 if root_trees["root_config"].settings.profiles:
15257                         continue
15258                 # generate some profile related warning messages
15259                 validate_ebuild_environment(trees)
15260                 msg = "If you have just changed your profile configuration, you " + \
15261                         "should revert back to the previous configuration. Due to " + \
15262                         "your current profile being invalid, allowed actions are " + \
15263                         "limited to --help, --info, --sync, and --version."
15264                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15265                         level=logging.ERROR, noiselevel=-1)
15266                 return 1
15267         return os.EX_OK
15268
15269 def emerge_main():
15270         global portage  # NFC why this is necessary now - genone
15271         portage._disable_legacy_globals()
15272         # Disable color until we're sure that it should be enabled (after
15273         # EMERGE_DEFAULT_OPTS has been parsed).
15274         portage.output.havecolor = 0
15275         # This first pass is just for options that need to be known as early as
15276         # possible, such as --config-root.  They will be parsed again later,
15277         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15278         # the value of --config-root).
15279         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15280         if "--debug" in myopts:
15281                 os.environ["PORTAGE_DEBUG"] = "1"
15282         if "--config-root" in myopts:
15283                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15284
15285         # Portage needs to ensure a sane umask for the files it creates.
15286         os.umask(022)
15287         settings, trees, mtimedb = load_emerge_config()
15288         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15289         rval = profile_check(trees, myaction, myopts)
15290         if rval != os.EX_OK:
15291                 return rval
15292
15293         if portage._global_updates(trees, mtimedb["updates"]):
15294                 mtimedb.commit()
15295                 # Reload the whole config from scratch.
15296                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15297                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15298
15299         xterm_titles = "notitles" not in settings.features
15300
15301         tmpcmdline = []
15302         if "--ignore-default-opts" not in myopts:
15303                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15304         tmpcmdline.extend(sys.argv[1:])
15305         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15306
15307         if "--digest" in myopts:
15308                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15309                 # Reload the whole config from scratch so that the portdbapi internal
15310                 # config is updated with new FEATURES.
15311                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15312                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15313
15314         for myroot in trees:
15315                 mysettings =  trees[myroot]["vartree"].settings
15316                 mysettings.unlock()
15317                 adjust_config(myopts, mysettings)
15318                 if '--pretend' not in myopts and myaction in \
15319                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15320                         mysettings["PORTAGE_COUNTER_HASH"] = \
15321                                 trees[myroot]["vartree"].dbapi._counter_hash()
15322                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15323                 mysettings.lock()
15324                 del myroot, mysettings
15325
15326         apply_priorities(settings)
15327
15328         spinner = stdout_spinner()
15329         if "candy" in settings.features:
15330                 spinner.update = spinner.update_scroll
15331
15332         if "--quiet" not in myopts:
15333                 portage.deprecated_profile_check(settings=settings)
15334                 repo_name_check(trees)
15335                 config_protect_check(trees)
15336
15337         eclasses_overridden = {}
15338         for mytrees in trees.itervalues():
15339                 mydb = mytrees["porttree"].dbapi
15340                 # Freeze the portdbapi for performance (memoize all xmatch results).
15341                 mydb.freeze()
15342                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15343         del mytrees, mydb
15344
15345         if eclasses_overridden and \
15346                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15347                 prefix = bad(" * ")
15348                 if len(eclasses_overridden) == 1:
15349                         writemsg(prefix + "Overlay eclass overrides " + \
15350                                 "eclass from PORTDIR:\n", noiselevel=-1)
15351                 else:
15352                         writemsg(prefix + "Overlay eclasses override " + \
15353                                 "eclasses from PORTDIR:\n", noiselevel=-1)
15354                 writemsg(prefix + "\n", noiselevel=-1)
15355                 for eclass_name in sorted(eclasses_overridden):
15356                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
15357                                 (eclasses_overridden[eclass_name], eclass_name),
15358                                 noiselevel=-1)
15359                 writemsg(prefix + "\n", noiselevel=-1)
15360                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15361                 "because it will trigger invalidation of cached ebuild metadata " + \
15362                 "that is distributed with the portage tree. If you must " + \
15363                 "override eclasses from PORTDIR then you are advised to add " + \
15364                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15365                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15366                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15367                 "you would like to disable this warning."
15368                 from textwrap import wrap
15369                 for line in wrap(msg, 72):
15370                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15371
15372         if "moo" in myfiles:
15373                 print """
15374
15375   Larry loves Gentoo (""" + platform.system() + """)
15376
15377  _______________________
15378 < Have you mooed today? >
15379  -----------------------
15380         \   ^__^
15381          \  (oo)\_______
15382             (__)\       )\/\ 
15383                 ||----w |
15384                 ||     ||
15385
15386 """
15387
15388         for x in myfiles:
15389                 ext = os.path.splitext(x)[1]
15390                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15391                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15392                         break
15393
15394         root_config = trees[settings["ROOT"]]["root_config"]
15395         if myaction == "list-sets":
15396                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15397                 sys.stdout.flush()
15398                 return os.EX_OK
15399
15400         # only expand sets for actions taking package arguments
15401         oldargs = myfiles[:]
15402         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15403                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15404                 if retval != os.EX_OK:
15405                         return retval
15406
15407                 # Need to handle empty sets specially, otherwise emerge will react 
15408                 # with the help message for empty argument lists
15409                 if oldargs and not myfiles:
15410                         print "emerge: no targets left after set expansion"
15411                         return 0
15412
15413         if ("--tree" in myopts) and ("--columns" in myopts):
15414                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15415                 return 1
15416
15417         if ("--quiet" in myopts):
15418                 spinner.update = spinner.update_quiet
15419                 portage.util.noiselimit = -1
15420
15421         # Always create packages if FEATURES=buildpkg
15422         # Imply --buildpkg if --buildpkgonly
15423         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15424                 if "--buildpkg" not in myopts:
15425                         myopts["--buildpkg"] = True
15426
15427         # Always try and fetch binary packages if FEATURES=getbinpkg
15428         if ("getbinpkg" in settings.features):
15429                 myopts["--getbinpkg"] = True
15430
15431         if "--buildpkgonly" in myopts:
15432                 # --buildpkgonly will not merge anything, so
15433                 # it cancels all binary package options.
15434                 for opt in ("--getbinpkg", "--getbinpkgonly",
15435                         "--usepkg", "--usepkgonly"):
15436                         myopts.pop(opt, None)
15437
15438         if "--fetch-all-uri" in myopts:
15439                 myopts["--fetchonly"] = True
15440
15441         if "--skipfirst" in myopts and "--resume" not in myopts:
15442                 myopts["--resume"] = True
15443
15444         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15445                 myopts["--usepkgonly"] = True
15446
15447         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15448                 myopts["--getbinpkg"] = True
15449
15450         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15451                 myopts["--usepkg"] = True
15452
15453         # Also allow -K to apply --usepkg/-k
15454         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15455                 myopts["--usepkg"] = True
15456
15457         # Allow -p to remove --ask
15458         if ("--pretend" in myopts) and ("--ask" in myopts):
15459                 print ">>> --pretend disables --ask... removing --ask from options."
15460                 del myopts["--ask"]
15461
15462         # forbid --ask when not in a terminal
15463         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15464         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15465                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15466                         noiselevel=-1)
15467                 return 1
15468
15469         if settings.get("PORTAGE_DEBUG", "") == "1":
15470                 spinner.update = spinner.update_quiet
15471                 portage.debug=1
15472                 if "python-trace" in settings.features:
15473                         import portage.debug
15474                         portage.debug.set_trace(True)
15475
15476         if not ("--quiet" in myopts):
15477                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15478                         spinner.update = spinner.update_basic
15479
15480         if myaction == 'version':
15481                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15482                         settings.profile_path, settings["CHOST"],
15483                         trees[settings["ROOT"]]["vartree"].dbapi)
15484                 return 0
15485         elif "--help" in myopts:
15486                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15487                 return 0
15488
15489         if "--debug" in myopts:
15490                 print "myaction", myaction
15491                 print "myopts", myopts
15492
15493         if not myaction and not myfiles and "--resume" not in myopts:
15494                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15495                 return 1
15496
15497         pretend = "--pretend" in myopts
15498         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15499         buildpkgonly = "--buildpkgonly" in myopts
15500
15501         # check if root user is the current user for the actions where emerge needs this
15502         if portage.secpass < 2:
15503                 # We've already allowed "--version" and "--help" above.
15504                 if "--pretend" not in myopts and myaction not in ("search","info"):
15505                         need_superuser = not \
15506                                 (fetchonly or \
15507                                 (buildpkgonly and secpass >= 1) or \
15508                                 myaction in ("metadata", "regen") or \
15509                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15510                         if portage.secpass < 1 or \
15511                                 need_superuser:
15512                                 if need_superuser:
15513                                         access_desc = "superuser"
15514                                 else:
15515                                         access_desc = "portage group"
15516                                 # Always show portage_group_warning() when only portage group
15517                                 # access is required but the user is not in the portage group.
15518                                 from portage.data import portage_group_warning
15519                                 if "--ask" in myopts:
15520                                         myopts["--pretend"] = True
15521                                         del myopts["--ask"]
15522                                         print ("%s access is required... " + \
15523                                                 "adding --pretend to options.\n") % access_desc
15524                                         if portage.secpass < 1 and not need_superuser:
15525                                                 portage_group_warning()
15526                                 else:
15527                                         sys.stderr.write(("emerge: %s access is " + \
15528                                                 "required.\n\n") % access_desc)
15529                                         if portage.secpass < 1 and not need_superuser:
15530                                                 portage_group_warning()
15531                                         return 1
15532
15533         disable_emergelog = False
15534         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15535                 if x in myopts:
15536                         disable_emergelog = True
15537                         break
15538         if myaction in ("search", "info"):
15539                 disable_emergelog = True
15540         if disable_emergelog:
15541                 """ Disable emergelog for everything except build or unmerge
15542                 operations.  This helps minimize parallel emerge.log entries that can
15543                 confuse log parsers.  We especially want it disabled during
15544                 parallel-fetch, which uses --resume --fetchonly."""
15545                 global emergelog
15546                 def emergelog(*pargs, **kargs):
15547                         pass
15548
15549         if not "--pretend" in myopts:
15550                 emergelog(xterm_titles, "Started emerge on: "+\
15551                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15552                 myelogstr=""
15553                 if myopts:
15554                         myelogstr=" ".join(myopts)
15555                 if myaction:
15556                         myelogstr+=" "+myaction
15557                 if myfiles:
15558                         myelogstr += " " + " ".join(oldargs)
15559                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15560         del oldargs
15561
15562         def emergeexitsig(signum, frame):
15563                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15564                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15565                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15566                 sys.exit(100+signum)
15567         signal.signal(signal.SIGINT, emergeexitsig)
15568         signal.signal(signal.SIGTERM, emergeexitsig)
15569
15570         def emergeexit():
15571                 """This gets out final log message in before we quit."""
15572                 if "--pretend" not in myopts:
15573                         emergelog(xterm_titles, " *** terminating.")
15574                 if "notitles" not in settings.features:
15575                         xtermTitleReset()
15576         portage.atexit_register(emergeexit)
15577
15578         if myaction in ("config", "metadata", "regen", "sync"):
15579                 if "--pretend" in myopts:
15580                         sys.stderr.write(("emerge: The '%s' action does " + \
15581                                 "not support '--pretend'.\n") % myaction)
15582                         return 1
15583
15584         if "sync" == myaction:
15585                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15586         elif "metadata" == myaction:
15587                 action_metadata(settings, portdb, myopts)
15588         elif myaction=="regen":
15589                 validate_ebuild_environment(trees)
15590                 return action_regen(settings, portdb, myopts.get("--jobs"),
15591                         myopts.get("--load-average"))
15592         # HELP action
15593         elif "config"==myaction:
15594                 validate_ebuild_environment(trees)
15595                 action_config(settings, trees, myopts, myfiles)
15596
15597         # SEARCH action
15598         elif "search"==myaction:
15599                 validate_ebuild_environment(trees)
15600                 action_search(trees[settings["ROOT"]]["root_config"],
15601                         myopts, myfiles, spinner)
15602         elif myaction in ("clean", "unmerge") or \
15603                 (myaction == "prune" and "--nodeps" in myopts):
15604                 validate_ebuild_environment(trees)
15605
15606                 # Ensure atoms are valid before calling unmerge().
15607                 # For backward compat, leading '=' is not required.
15608                 for x in myfiles:
15609                         if is_valid_package_atom(x) or \
15610                                 is_valid_package_atom("=" + x):
15611                                 continue
15612                         msg = []
15613                         msg.append("'%s' is not a valid package atom." % (x,))
15614                         msg.append("Please check ebuild(5) for full details.")
15615                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15616                                 level=logging.ERROR, noiselevel=-1)
15617                         return 1
15618
15619                 # When given a list of atoms, unmerge
15620                 # them in the order given.
15621                 ordered = myaction == "unmerge"
15622                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15623                         mtimedb["ldpath"], ordered=ordered):
15624                         if not (buildpkgonly or fetchonly or pretend):
15625                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15626
15627         elif myaction in ("depclean", "info", "prune"):
15628
15629                 # Ensure atoms are valid before calling unmerge().
15630                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15631                 valid_atoms = []
15632                 for x in myfiles:
15633                         if is_valid_package_atom(x):
15634                                 try:
15635                                         valid_atoms.append(
15636                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15637                                 except portage.exception.AmbiguousPackageName, e:
15638                                         msg = "The short ebuild name \"" + x + \
15639                                                 "\" is ambiguous.  Please specify " + \
15640                                                 "one of the following " + \
15641                                                 "fully-qualified ebuild names instead:"
15642                                         for line in textwrap.wrap(msg, 70):
15643                                                 writemsg_level("!!! %s\n" % (line,),
15644                                                         level=logging.ERROR, noiselevel=-1)
15645                                         for i in e[0]:
15646                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15647                                                         level=logging.ERROR, noiselevel=-1)
15648                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15649                                         return 1
15650                                 continue
15651                         msg = []
15652                         msg.append("'%s' is not a valid package atom." % (x,))
15653                         msg.append("Please check ebuild(5) for full details.")
15654                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15655                                 level=logging.ERROR, noiselevel=-1)
15656                         return 1
15657
15658                 if myaction == "info":
15659                         return action_info(settings, trees, myopts, valid_atoms)
15660
15661                 validate_ebuild_environment(trees)
15662                 action_depclean(settings, trees, mtimedb["ldpath"],
15663                         myopts, myaction, valid_atoms, spinner)
15664                 if not (buildpkgonly or fetchonly or pretend):
15665                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15666         # "update", "system", or just process files:
15667         else:
15668                 validate_ebuild_environment(trees)
15669                 if "--pretend" not in myopts:
15670                         display_news_notification(root_config, myopts)
15671                 retval = action_build(settings, trees, mtimedb,
15672                         myopts, myaction, myfiles, spinner)
15673                 root_config = trees[settings["ROOT"]]["root_config"]
15674                 post_emerge(root_config, myopts, mtimedb, retval)
15675
15676                 return retval